summaryrefslogtreecommitdiff
path: root/target/arm/solidrun-imx6
diff options
context:
space:
mode:
authorWaldemar Brodkorb <wbx@openadk.org>2015-08-01 10:41:19 +0200
committerWaldemar Brodkorb <wbx@openadk.org>2015-08-01 10:42:22 +0200
commit8904b7659a3fff94637af328dae415110b6f46d7 (patch)
treef8388766fa798f5f328c09a584dd9bf248afb990 /target/arm/solidrun-imx6
parentce515b6da73e7b2cfbfb4de79b6c293b3257d47a (diff)
update to 4.1.3, which will be the base for stable branch
Diffstat (limited to 'target/arm/solidrun-imx6')
-rw-r--r--target/arm/solidrun-imx6/patches/4.1.3/0001-xbian.patch171697
1 files changed, 171697 insertions, 0 deletions
diff --git a/target/arm/solidrun-imx6/patches/4.1.3/0001-xbian.patch b/target/arm/solidrun-imx6/patches/4.1.3/0001-xbian.patch
new file mode 100644
index 000000000..6286c0868
--- /dev/null
+++ b/target/arm/solidrun-imx6/patches/4.1.3/0001-xbian.patch
@@ -0,0 +1,171697 @@
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6dl.dtsi linux-xbian-imx6/arch/arm/boot/dts/imx6dl.dtsi
+--- linux-4.1.3/arch/arm/boot/dts/imx6dl.dtsi 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6dl.dtsi 2015-07-27 23:13:00.299912248 +0200
+@@ -60,17 +60,103 @@
+ };
+
+ soc {
+- ocram: sram@00900000 {
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks 171>, <&clks 6>, <&clks 11>, <&clks 104>, <&clks 172>, <&clks 58>,
++ <&clks 18>, <&clks 60>, <&clks 20>, <&clks 3>, <&clks 22> , <&clks 8>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc", "axi_sel", "pll3_pfd1_540m";
++ interrupts = <0 107 0x04>, <0 112 0x4>;
++ interrupt-names = "irq_busfreq_0", "irq_busfreq_1";
++ fsl,max_ddr_freq = <400000000>;
++ };
++
++ gpu@00130000 {
++ compatible = "fsl,imx6dl-gpu", "fsl,imx6q-gpu";
++ reg = <0x00130000 0x4000>, <0x00134000 0x4000>,
++ <0x0 0x0>;
++ reg-names = "iobase_3d", "iobase_2d",
++ "phys_baseaddr";
++ interrupts = <0 9 0x04>, <0 10 0x04>;
++ interrupt-names = "irq_3d", "irq_2d";
++ clocks = <&clks 26>, <&clks 27>,
++ <&clks 121>, <&clks 122>,
++ <&clks 74>;
++ clock-names = "gpu2d_axi_clk", "gpu3d_axi_clk",
++ "gpu2d_clk", "gpu3d_clk",
++ "gpu3d_shader_clk";
++ resets = <&src 0>, <&src 3>;
++ reset-names = "gpu3d", "gpu2d";
++ power-domains = <&gpc 1>;
++ };
++
++ hdmi_core: hdmi_core@00120000 {
++ compatible = "fsl,imx6q-hdmi-core";
++ reg = <0x00120000 0x9000>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_video: hdmi_video@020e0000 {
++ compatible = "fsl,imx6q-hdmi-video";
++ reg = <0x020e0000 0x1000>;
++ reg-names = "hdmi_gpr";
++ interrupts = <0 115 0x04>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_audio: hdmi_audio@00120000 {
++ compatible = "fsl,imx6q-hdmi-audio";
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ dmas = <&sdma 2 23 0>;
++ dma-names = "tx";
++ status = "disabled";
++ };
++
++ hdmi_cec: hdmi_cec@00120000 {
++ compatible = "fsl,imx6q-hdmi-cec";
++ interrupts = <0 115 0x04>;
++ status = "disabled";
++ };
++
++ ocrams: sram@00900000 {
++ compatible = "fsl,lpm-sram";
++ reg = <0x00900000 0x4000>;
++ clocks = <&clks IMX6QDL_CLK_OCRAM>;
++ };
++
++ ocrams_ddr: sram@00904000 {
++ compatible = "fsl,ddr-lpm-sram";
++ reg = <0x00904000 0x1000>;
++ clocks = <&clks IMX6QDL_CLK_OCRAM>;
++ };
++
++ ocram: sram@00905000 {
+ compatible = "mmio-sram";
+- reg = <0x00900000 0x20000>;
++ reg = <0x00905000 0x1B000>;
+ clocks = <&clks IMX6QDL_CLK_OCRAM>;
+ };
+
+ aips1: aips-bus@02000000 {
++ vpu@02040000 {
++ iramsize = <0>;
++ status = "okay";
++ };
++
+ iomuxc: iomuxc@020e0000 {
+ compatible = "fsl,imx6dl-iomuxc";
+ };
+
++ dcic2: dcic@020e8000 {
++ clocks = <&clks IMX6QDL_CLK_DCIC1 >,
++ <&clks IMX6QDL_CLK_DCIC2>; /* DCIC2 depend on DCIC1 clock in imx6dl*/
++ clock-names = "dcic", "disp-axi";
++ };
++
+ pxp: pxp@020f0000 {
+ reg = <0x020f0000 0x4000>;
+ interrupts = <0 98 IRQ_TYPE_LEVEL_HIGH>;
+@@ -99,26 +185,13 @@
+ };
+ };
+ };
+-
+- display-subsystem {
+- compatible = "fsl,imx-display-subsystem";
+- ports = <&ipu1_di0>, <&ipu1_di1>;
+- };
+-};
+-
+-&hdmi {
+- compatible = "fsl,imx6dl-hdmi";
+ };
+
+ &ldb {
+- clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
+- <&clks IMX6QDL_CLK_IPU1_DI0_SEL>, <&clks IMX6QDL_CLK_IPU1_DI1_SEL>,
+- <&clks IMX6QDL_CLK_LDB_DI0>, <&clks IMX6QDL_CLK_LDB_DI1>;
++ clocks = <&clks 33>, <&clks 34>,
++ <&clks 39>, <&clks 40>,
++ <&clks 135>, <&clks 136>;
+ clock-names = "di0_pll", "di1_pll",
+ "di0_sel", "di1_sel",
+ "di0", "di1";
+ };
+-
+-&vpu {
+- compatible = "fsl,imx6dl-vpu", "cnm,coda960";
+-};
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi linux-xbian-imx6/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+--- linux-4.1.3/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2015-07-27 23:13:00.303898027 +0200
+@@ -45,11 +45,22 @@
+ #include <dt-bindings/gpio/gpio.h>
+
+ / {
++ chosen {
++ bootargs = "quiet console=ttymxc0,115200 root=/dev/mmcblk0p2 rw";
++ };
++
++ aliases {
++ mmc0 = &usdhc2;
++ mmc1 = &usdhc1;
++ mxcfb0 = &mxcfb1;
++ };
++
+ ir_recv: ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio3 9 1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_ir>;
++ linux,rc-map-name = "rc-rc6-mce";
+ };
+
+ pwmleds {
+@@ -78,6 +89,8 @@
+
+ reg_usbh1_vbus: usb-h1-vbus {
+ compatible = "regulator-fixed";
++ regulator-boot-on;
++ regulator-always-on;
+ enable-active-high;
+ gpio = <&gpio1 0 0>;
+ pinctrl-names = "default";
+@@ -89,6 +102,8 @@
+
+ reg_usbotg_vbus: usb-otg-vbus {
+ compatible = "regulator-fixed";
++ regulator-boot-on;
++ regulator-always-on;
+ enable-active-high;
+ gpio = <&gpio3 22 0>;
+ pinctrl-names = "default";
+@@ -101,8 +116,7 @@
+
+ sound-spdif {
+ compatible = "fsl,imx-audio-spdif";
+- model = "Integrated SPDIF";
+- /* IMX6 doesn't implement this yet */
++ model = "imx-spdif";
+ spdif-controller = <&spdif>;
+ spdif-out;
+ };
+@@ -118,12 +132,45 @@
+ linux,code = <BTN_0>;
+ };
+ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <32>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
++};
++
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
+ };
+
+-&hdmi {
++&hdmi_cec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_hdmi>;
+- ddc-i2c-bus = <&i2c2>;
+ status = "okay";
+ };
+
+@@ -131,7 +178,13 @@
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_i2c2>;
++
+ status = "okay";
++
++ ddc: imx6_hdmi_i2c@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
+ };
+
+ &i2c3 {
+@@ -228,6 +281,28 @@
+ MX6QDL_PAD_EIM_DA8__GPIO3_IO08 0x17059
+ >;
+ };
++
++ pinctrl_cubox_i_usdhc2_100mhz: cubox-i-usdhc2-100mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130b9
++ >;
++ };
++
++ pinctrl_cubox_i_usdhc2_200mhz: cubox-i-usdhc2-200mhz {
++ fsl,pins = <
++ MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9
++ MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9
++ MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9
++ MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9
++ MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9
++ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130f9
++ >;
++ };
+ };
+ };
+
+@@ -256,9 +331,24 @@
+ };
+
+ &usdhc2 {
+- pinctrl-names = "default";
++ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
++ pinctrl-1 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_100mhz>;
++ pinctrl-2 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_200mhz>;
+ vmmc-supply = <&reg_3p3v>;
+ cd-gpios = <&gpio1 4 0>;
+ status = "okay";
++ no-1-8-v;
++};
++
++&dcic1 {
++ dcic_id = <0>;
++ dcic_mux = "dcic-hdmi";
++ status = "okay";
++};
++
++&dcic2 {
++ dcic_id = <1>;
++ dcic_mux = "dcic-lvds1";
++ status = "okay";
+ };
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6qdl.dtsi linux-xbian-imx6/arch/arm/boot/dts/imx6qdl.dtsi
+--- linux-4.1.3/arch/arm/boot/dts/imx6qdl.dtsi 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6qdl.dtsi 2015-07-27 23:13:00.303898027 +0200
+@@ -14,6 +14,7 @@
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ #include "skeleton.dtsi"
++#include <dt-bindings/gpio/gpio.h>
+
+ / {
+ aliases {
+@@ -30,6 +31,7 @@
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
++ ipu0 = &ipu1;
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc3;
+@@ -79,6 +81,10 @@
+ };
+ };
+
++ pu_dummy: pudummy_reg {
++ compatible = "fsl,imx6-dummy-pureg"; /* only used in ldo-bypass */
++ };
++
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+@@ -86,6 +92,11 @@
+ interrupt-parent = <&gpc>;
+ ranges;
+
++ caam_sm: caam-sm@00100000 {
++ compatible = "fsl,imx6q-caam-sm";
++ reg = <0x00100000 0x3fff>;
++ };
++
+ dma_apbh: dma-apbh@00110000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x00110000 0x2000>;
+@@ -99,6 +110,12 @@
+ clocks = <&clks IMX6QDL_CLK_APBH_DMA>;
+ };
+
++ irq_sec_vio: caam_secvio {
++ compatible = "fsl,imx6q-caam-secvio";
++ interrupts = <0 20 0x04>;
++ secvio_src = <0x8000001d>;
++ };
++
+ gpmi: gpmi-nand@00112000 {
+ compatible = "fsl,imx6q-gpmi-nand";
+ #address-cells = <1>;
+@@ -190,16 +207,16 @@
+ dmas = <&sdma 14 18 0>,
+ <&sdma 15 18 0>;
+ dma-names = "rx", "tx";
+- clocks = <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_OSC>,
+- <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_DUMMY>,
+- <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_DUMMY>,
+- <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_DUMMY>,
+- <&clks IMX6QDL_CLK_DUMMY>;
++ clocks = <&clks IMX6QDL_CLK_SPDIF_GCLK>, <&clks IMX6QDL_CLK_OSC>,
++ <&clks IMX6QDL_CLK_SPDIF>, <&clks IMX6QDL_CLK_ASRC>,
++ <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_ESAI_EXTAL>,
++ <&clks IMX6QDL_CLK_IPG>, <&clks IMX6QDL_CLK_MLB>,
++ <&clks IMX6QDL_CLK_DUMMY>, <&clks IMX6QDL_CLK_SPBA>;
+ clock-names = "core", "rxtx0",
+ "rxtx1", "rxtx2",
+ "rxtx3", "rxtx4",
+ "rxtx5", "rxtx6",
+- "rxtx7";
++ "rxtx7", "dma";
+ status = "disabled";
+ };
+
+@@ -274,7 +291,12 @@
+ esai: esai@02024000 {
+ reg = <0x02024000 0x4000>;
+ interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH>;
+- };
++ compatible = "fsl,imx6q-esai";
++ clocks = <&clks 118>;
++ fsl,esai-dma-events = <24 23>;
++ fsl,flags = <1>;
++ status = "disabled";
++ };
+
+ ssi1: ssi@02028000 {
+ #sound-dai-cells = <0>;
+@@ -325,8 +347,30 @@
+ };
+
+ asrc: asrc@02034000 {
++ compatible = "fsl,imx53-asrc";
+ reg = <0x02034000 0x4000>;
+ interrupts = <0 50 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6QDL_CLK_ASRC_MEM>,
++ <&clks IMX6QDL_CLK_ASRC_IPG>,
++ <&clks IMX6QDL_CLK_SPDIF>,
++ <&clks IMX6QDL_CLK_SPBA>;
++ clock-names = "mem", "ipg", "asrck_0", "dma";
++ dmas = <&sdma 17 20 1>, <&sdma 18 20 1>, <&sdma 19 20 1>,
++ <&sdma 20 20 1>, <&sdma 21 20 1>, <&sdma 22 20 1>;
++ dma-names = "rxa", "rxb", "rxc",
++ "txa", "txb", "txc";
++ fsl,asrc-rate = <48000>;
++ fsl,asrc-width = <16>;
++ status = "okay";
++ };
++
++ asrc_p2p: asrc_p2p {
++ compatible = "fsl,imx6q-asrc-p2p";
++ fsl,output-rate = <48000>;
++ fsl,output-width = <16>;
++ fsl,asrc-dma-rx-events = <17 18 19>;
++ fsl,asrc-dma-tx-events = <20 21 22>;
++ status = "okay";
+ };
+
+ spba@0203c000 {
+@@ -335,16 +379,20 @@
+ };
+
+ vpu: vpu@02040000 {
+- compatible = "cnm,coda960";
++ compatible = "cnm,coda960", "fsl,imx6-vpu";
+ reg = <0x02040000 0x3c000>;
++ reg-names = "vpu_regs";
+ interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "bit", "jpeg";
+ clocks = <&clks IMX6QDL_CLK_VPU_AXI>,
+- <&clks IMX6QDL_CLK_MMDC_CH0_AXI>;
+- clock-names = "per", "ahb";
+- resets = <&src 1>;
++ <&clks IMX6QDL_CLK_MMDC_CH0_AXI>,
++ <&clks IMX6QDL_CLK_OCRAM>;
++ clock-names = "per", "ahb", "ocram";
++ iramsize = <0x21000>;
+ iram = <&ocram>;
++ resets = <&src 1>;
++ power-domains = <&gpc 1>;
+ };
+
+ aipstz@0207c000 { /* AIPSTZ1 */
+@@ -552,20 +600,21 @@
+ anatop-min-bit-val = <4>;
+ anatop-min-voltage = <800000>;
+ anatop-max-voltage = <1375000>;
++ anatop-enable-bit = <0>;
+ };
+
+- regulator-3p0@120 {
++ reg_3p0: regulator-3p0@120 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd3p0";
+- regulator-min-microvolt = <2800000>;
+- regulator-max-microvolt = <3150000>;
+- regulator-always-on;
++ regulator-min-microvolt = <2625000>;
++ regulator-max-microvolt = <3400000>;
+ anatop-reg-offset = <0x120>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2625000>;
+ anatop-max-voltage = <3400000>;
++ anatop-enable-bit = <0>;
+ };
+
+ regulator-2p5@130 {
+@@ -580,6 +629,7 @@
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2000000>;
+ anatop-max-voltage = <2750000>;
++ anatop-enable-bit = <0>;
+ };
+
+ reg_arm: regulator-vddcore@140 {
+@@ -647,6 +697,7 @@
+ reg = <0x020c9000 0x1000>;
+ interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6QDL_CLK_USBPHY1>;
++ phy-3p0-supply = <&reg_3p0>;
+ fsl,anatop = <&anatop>;
+ };
+
+@@ -655,9 +706,15 @@
+ reg = <0x020ca000 0x1000>;
+ interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6QDL_CLK_USBPHY2>;
++ phy-3p0-supply = <&reg_3p0>;
+ fsl,anatop = <&anatop>;
+ };
+
++ caam_snvs: caam-snvs@020cc000 {
++ compatible = "fsl,imx6q-caam-snvs";
++ reg = <0x020cc000 0x4000>;
++ };
++
+ snvs@020cc000 {
+ compatible = "fsl,sec-v4.0-mon", "simple-bus";
+ #address-cells = <1>;
+@@ -704,14 +761,12 @@
+ interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>,
+ <0 90 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&intc>;
+- pu-supply = <&reg_pu>;
+- clocks = <&clks IMX6QDL_CLK_GPU3D_CORE>,
+- <&clks IMX6QDL_CLK_GPU3D_SHADER>,
+- <&clks IMX6QDL_CLK_GPU2D_CORE>,
+- <&clks IMX6QDL_CLK_GPU2D_AXI>,
+- <&clks IMX6QDL_CLK_OPENVG_AXI>,
+- <&clks IMX6QDL_CLK_VPU_AXI>;
+ #power-domain-cells = <1>;
++ clocks = <&clks 122>, <&clks 74>, <&clks 121>,
++ <&clks 26>, <&clks 143>, <&clks 168>;
++ clock-names = "gpu3d_core", "gpu3d_shader", "gpu2d_core",
++ "gpu2d_axi", "openvg_axi", "vpu_axi";
++ pu-supply = <&reg_pu>;
+ };
+
+ gpr: iomuxc-gpr@020e0000 {
+@@ -736,22 +791,6 @@
+ #size-cells = <0>;
+ reg = <0>;
+ status = "disabled";
+-
+- port@0 {
+- reg = <0>;
+-
+- lvds0_mux_0: endpoint {
+- remote-endpoint = <&ipu1_di0_lvds0>;
+- };
+- };
+-
+- port@1 {
+- reg = <1>;
+-
+- lvds0_mux_1: endpoint {
+- remote-endpoint = <&ipu1_di1_lvds0>;
+- };
+- };
+ };
+
+ lvds-channel@1 {
+@@ -759,22 +798,6 @@
+ #size-cells = <0>;
+ reg = <1>;
+ status = "disabled";
+-
+- port@0 {
+- reg = <0>;
+-
+- lvds1_mux_0: endpoint {
+- remote-endpoint = <&ipu1_di0_lvds1>;
+- };
+- };
+-
+- port@1 {
+- reg = <1>;
+-
+- lvds1_mux_1: endpoint {
+- remote-endpoint = <&ipu1_di1_lvds1>;
+- };
+- };
+ };
+ };
+
+@@ -788,32 +811,26 @@
+ <&clks IMX6QDL_CLK_HDMI_ISFR>;
+ clock-names = "iahb", "isfr";
+ status = "disabled";
+-
+- port@0 {
+- reg = <0>;
+-
+- hdmi_mux_0: endpoint {
+- remote-endpoint = <&ipu1_di0_hdmi>;
+- };
+- };
+-
+- port@1 {
+- reg = <1>;
+-
+- hdmi_mux_1: endpoint {
+- remote-endpoint = <&ipu1_di1_hdmi>;
+- };
+- };
+ };
+
+ dcic1: dcic@020e4000 {
++ compatible = "fsl,imx6q-dcic";
+ reg = <0x020e4000 0x4000>;
+ interrupts = <0 124 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6QDL_CLK_DCIC1>, <&clks IMX6QDL_CLK_DCIC1>;
++ clock-names = "dcic", "disp-axi";
++ gpr = <&gpr>;
++ status = "disabled";
+ };
+
+ dcic2: dcic@020e8000 {
++ compatible = "fsl,imx6q-dcic";
+ reg = <0x020e8000 0x4000>;
+ interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks IMX6QDL_CLK_DCIC2>, <&clks IMX6QDL_CLK_DCIC2>;
++ clock-names = "dcic", "disp-axi";
++ gpr = <&gpr>;
++ status = "disabled";
+ };
+
+ sdma: sdma@020ec000 {
+@@ -824,6 +841,7 @@
+ <&clks IMX6QDL_CLK_SDMA>;
+ clock-names = "ipg", "ahb";
+ #dma-cells = <3>;
++ iram = <&ocram>;
+ fsl,sdma-ram-script-name = "imx/sdma/sdma-imx6q.bin";
+ };
+ };
+@@ -835,10 +853,30 @@
+ reg = <0x02100000 0x100000>;
+ ranges;
+
+- caam@02100000 {
+- reg = <0x02100000 0x40000>;
+- interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>,
+- <0 106 IRQ_TYPE_LEVEL_HIGH>;
++ crypto: caam@2100000 {
++ compatible = "fsl,sec-v4.0";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ reg = <0x2100000 0x40000>;
++ ranges = <0 0x2100000 0x40000>;
++ interrupt-parent = <&intc>; /* interrupts = <0 92 0x4>; */
++ interrupts = <0 92 0x4>;
++ clocks = <&clks 213>, <&clks 214>, <&clks 215> ,<&clks 196>;
++ clock-names = "caam_mem", "caam_aclk", "caam_ipg", "caam_emi_slow";
++
++ sec_jr0: jr0@1000 {
++ compatible = "fsl,sec-v4.0-job-ring";
++ reg = <0x1000 0x1000>;
++ interrupt-parent = <&intc>;
++ interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ sec_jr1: jr1@2000 {
++ compatible = "fsl,sec-v4.0-job-ring";
++ reg = <0x2000 0x1000>;
++ interrupt-parent = <&intc>;
++ interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
++ };
+ };
+
+ aipstz@0217c000 { /* AIPSTZ2 */
+@@ -852,6 +890,7 @@
+ clocks = <&clks IMX6QDL_CLK_USBOH3>;
+ fsl,usbphy = <&usbphy1>;
+ fsl,usbmisc = <&usbmisc 0>;
++ fsl,anatop = <&anatop>;
+ status = "disabled";
+ };
+
+@@ -903,14 +942,21 @@
+ <&clks IMX6QDL_CLK_ENET>,
+ <&clks IMX6QDL_CLK_ENET_REF>;
+ clock-names = "ipg", "ahb", "ptp";
+- status = "disabled";
++ phy-mode = "rgmii";
++ fsl,magic-packet;
++ status = "okay";
+ };
+
+- mlb@0218c000 {
++ mlb: mlb@0218c000 {
+ reg = <0x0218c000 0x4000>;
+ interrupts = <0 53 IRQ_TYPE_LEVEL_HIGH>,
+ <0 117 IRQ_TYPE_LEVEL_HIGH>,
+ <0 126 IRQ_TYPE_LEVEL_HIGH>;
++ compatible = "fsl,imx6q-mlb150";
++ clocks = <&clks 139>, <&clks 175>;
++ clock-names = "mlb", "pll8_mlb";
++ iram = <&ocram>;
++ status = "disabled";
+ };
+
+ usdhc1: usdhc@02190000 {
+@@ -995,6 +1041,11 @@
+ reg = <0x021ac000 0x4000>;
+ };
+
++ mmdc0-1@021b0000 {
++ compatible = "fsl,imx6q-mmdc-combine";
++ reg = <0x021b0000 0x8000>;
++ };
++
+ mmdc0: mmdc@021b0000 { /* MMDC0 */
+ compatible = "fsl,imx6q-mmdc";
+ reg = <0x021b0000 0x4000>;
+@@ -1011,11 +1062,17 @@
+ clocks = <&clks IMX6QDL_CLK_EIM_SLOW>;
+ };
+
+- ocotp: ocotp@021bc000 {
+- compatible = "fsl,imx6q-ocotp", "syscon";
++ ocotp: ocotp-ctrl@021bc000 {
++ compatible = "syscon";
+ reg = <0x021bc000 0x4000>;
+ };
+
++ ocotp-fuse@021bc000 {
++ compatible = "fsl,imx6q-ocotp";
++ reg = <0x021bc000 0x4000>;
++ clocks = <&clks 128>;
++ };
++
+ tzasc@021d0000 { /* TZASC1 */
+ reg = <0x021d0000 0x4000>;
+ interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+@@ -1034,39 +1091,38 @@
+
+ mipi_csi: mipi@021dc000 {
+ reg = <0x021dc000 0x4000>;
++ compatible = "fsl,imx6q-mipi-csi2";
++ interrupts = <0 100 0x04>, <0 101 0x04>;
++ clocks = <&clks IMX6QDL_CLK_HSI_TX>,
++ <&clks IMX6QDL_CLK_EIM_SEL>,
++ <&clks IMX6QDL_CLK_LVDS2_IN>;
++ /* Note: clks 138 is hsi_tx, however, the dphy_c
++ * hsi_tx and pll_refclk use the same clk gate.
++ * In current clk driver, open/close clk gate do
++ * use hsi_tx for a temporary debug purpose.
++ */
++ clock-names = "dphy_clk", "pixel_clk", "cfg_clk";
++ status = "disabled";
+ };
+
+ mipi_dsi: mipi@021e0000 {
++ compatible = "fsl,imx6q-mipi-dsi";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x021e0000 0x4000>;
+ status = "disabled";
+-
+- ports {
+- #address-cells = <1>;
+- #size-cells = <0>;
+-
+- port@0 {
+- reg = <0>;
+-
+- mipi_mux_0: endpoint {
+- remote-endpoint = <&ipu1_di0_mipi>;
+- };
+- };
+-
+- port@1 {
+- reg = <1>;
+-
+- mipi_mux_1: endpoint {
+- remote-endpoint = <&ipu1_di1_mipi>;
+- };
+- };
+- };
++ interrupts = <0 102 0x04>;
++ gpr = <&gpr>;
++ clocks = <&clks IMX6QDL_CLK_HSI_TX>, <&clks IMX6QDL_CLK_VIDEO_27M>;
++ clock-names = "mipi_pllref_clk", "mipi_cfg_clk";
+ };
+
+ vdoa@021e4000 {
++ compatible = "fsl,imx6q-vdoa";
+ reg = <0x021e4000 0x4000>;
+ interrupts = <0 18 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clks 202>;
++ iram = <&ocram>;
+ };
+
+ uart2: serial@021e8000 {
+@@ -1127,67 +1183,14 @@
+ <0 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6QDL_CLK_IPU1>,
+ <&clks IMX6QDL_CLK_IPU1_DI0>,
+- <&clks IMX6QDL_CLK_IPU1_DI1>;
+- clock-names = "bus", "di0", "di1";
++ <&clks IMX6QDL_CLK_IPU1_DI1>,
++ <&clks 39>, <&clks 40>,
++ <&clks 135>, <&clks 136>;
++ clock-names = "bus", "di0", "di1",
++ "di0_sel", "di1_sel",
++ "ldb_di0", "ldb_di1";
+ resets = <&src 2>;
+-
+- ipu1_csi0: port@0 {
+- reg = <0>;
+- };
+-
+- ipu1_csi1: port@1 {
+- reg = <1>;
+- };
+-
+- ipu1_di0: port@2 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <2>;
+-
+- ipu1_di0_disp0: endpoint@0 {
+- };
+-
+- ipu1_di0_hdmi: endpoint@1 {
+- remote-endpoint = <&hdmi_mux_0>;
+- };
+-
+- ipu1_di0_mipi: endpoint@2 {
+- remote-endpoint = <&mipi_mux_0>;
+- };
+-
+- ipu1_di0_lvds0: endpoint@3 {
+- remote-endpoint = <&lvds0_mux_0>;
+- };
+-
+- ipu1_di0_lvds1: endpoint@4 {
+- remote-endpoint = <&lvds1_mux_0>;
+- };
+- };
+-
+- ipu1_di1: port@3 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <3>;
+-
+- ipu1_di0_disp1: endpoint@0 {
+- };
+-
+- ipu1_di1_hdmi: endpoint@1 {
+- remote-endpoint = <&hdmi_mux_1>;
+- };
+-
+- ipu1_di1_mipi: endpoint@2 {
+- remote-endpoint = <&mipi_mux_1>;
+- };
+-
+- ipu1_di1_lvds0: endpoint@3 {
+- remote-endpoint = <&lvds0_mux_1>;
+- };
+-
+- ipu1_di1_lvds1: endpoint@4 {
+- remote-endpoint = <&lvds1_mux_1>;
+- };
+- };
++ bypass_reset = <0>;
+ };
+ };
+ };
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi linux-xbian-imx6/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+--- linux-4.1.3/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi 2015-07-27 23:13:00.303898027 +0200
+@@ -43,8 +43,10 @@
+ #include "imx6qdl-microsom-ar8035.dtsi"
+
+ / {
+- chosen {
+- stdout-path = &uart1;
++ aliases {
++ mmc0 = &usdhc2;
++ mmc1 = &usdhc1;
++ mxcfb0 = &mxcfb1;
+ };
+
+ ir_recv: ir-receiver {
+@@ -52,6 +54,7 @@
+ gpios = <&gpio3 5 1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_gpio3_5>;
++ linux,rc-map-name = "rc-rc6-mce";
+ };
+
+ regulators {
+@@ -98,32 +101,70 @@
+ model = "On-board Codec";
+ mux-ext-port = <5>;
+ mux-int-port = <1>;
++ cpu-dai = <&ssi1>;
+ ssi-controller = <&ssi1>;
+ };
+
+ sound-spdif {
+ compatible = "fsl,imx-audio-spdif";
+- model = "On-board SPDIF";
++ model = "imx-spdif";
+ /* IMX6 doesn't implement this yet */
+ spdif-controller = <&spdif>;
+ spdif-out;
+ };
++
++ sound-hdmi {
++ compatible = "fsl,imx6q-audio-hdmi",
++ "fsl,imx-audio-hdmi";
++ model = "imx-audio-hdmi";
++ hdmi-controller = <&hdmi_audio>;
++ };
++
++ mxcfb1: fb@0 {
++ compatible = "fsl,mxc_sdc_fb";
++ disp_dev = "hdmi";
++ interface_pix_fmt = "RGB24";
++ mode_str ="1920x1080M@60";
++ default_bpp = <32>;
++ int_clk = <0>;
++ late_init = <0>;
++ status = "okay";
++ };
+ };
+
+ &audmux {
+ status = "okay";
+ };
+
+-&can1 {
++/*&can1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_flexcan1>;
+ status = "okay";
+ };
++*/
++&hdmi_core {
++ ipu_id = <0>;
++ disp_id = <0>;
++ status = "okay";
++};
++
++&hdmi_video {
++ fsl,phy_reg_vlev = <0x0294>;
++ fsl,phy_reg_cksymtx = <0x800d>;
++ status = "okay";
++};
++
++&hdmi_audio {
++ status = "okay";
++};
++
++&ocram {
++ status = "okay";
++};
+
+-&hdmi {
++&hdmi_cec {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_hdmi>;
+- ddc-i2c-bus = <&i2c2>;
+ status = "okay";
+ };
+
+@@ -136,6 +177,7 @@
+ rtc: pcf8523@68 {
+ compatible = "nxp,pcf8523";
+ reg = <0x68>;
++ nxp,12p5_pf;
+ };
+
+ /* Pro baseboard model */
+@@ -155,20 +197,57 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_i2c2>;
+ status = "okay";
++
++ ddc: imx6_hdmi_i2c@50 {
++ compatible = "fsl,imx6-hdmi-i2c";
++ reg = <0x50>;
++ };
+ };
+
+ &iomuxc {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hog>;
+ hummingboard {
+- pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 {
++ pinctrl_hog: hoggrp {
++ fsl,pins = <
++ /*
++ * 26 pin header GPIO description. The pins.
++ * numbering as following -
++ * GPIO number | GPIO (bank,num) | PIN number
++ * ------------+-----------------+------------
++ * gpio1 | (1,1) | IO7
++ * gpio73 | (3,9) | IO11
++ * gpio72 | (3,8) | IO12
++ * gpio71 | (3,7) | IO13
++ * gpio70 | (3,6) | IO15
++ * gpio194 | (7,2) | IO16
++ * gpio195 | (7,3) | IO18
++ * gpio67 | (3,3) | IO22
++ *
++ * Notice the gpioX and GPIO (Y,Z) mapping forumla :
++ * X = (Y-1) * 32 + Z
++ */
++ MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x400130b1
++ MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0x400130b1
++ MX6QDL_PAD_EIM_DA8__GPIO3_IO08 0x400130b1
++ MX6QDL_PAD_EIM_DA7__GPIO3_IO07 0x400130b1
++ MX6QDL_PAD_EIM_DA6__GPIO3_IO06 0x400130b1
++ MX6QDL_PAD_SD3_CMD__GPIO7_IO02 0x400130b1
++ MX6QDL_PAD_SD3_CLK__GPIO7_IO03 0x400130b1
++ MX6QDL_PAD_EIM_DA3__GPIO3_IO03 0x400130b1
++ >;
++ };
++
++/* pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 {
+ fsl,pins = <
+ MX6QDL_PAD_SD3_CLK__FLEXCAN1_RX 0x80000000
+ MX6QDL_PAD_SD3_CMD__FLEXCAN1_TX 0x80000000
+ >;
+ };
+-
++*/
+ pinctrl_hummingboard_gpio3_5: hummingboard-gpio3_5 {
+ fsl,pins = <
+- MX6QDL_PAD_EIM_DA5__GPIO3_IO05 0x1b0b1
++ MX6QDL_PAD_EIM_DA5__GPIO3_IO05 0x80000000
+ >;
+ };
+
+@@ -198,10 +277,10 @@
+
+ pinctrl_hummingboard_sgtl5000: hummingboard-sgtl5000 {
+ fsl,pins = <
+- MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x130b0
+- MX6QDL_PAD_KEY_COL0__AUD5_TXC 0x130b0
+- MX6QDL_PAD_KEY_ROW0__AUD5_TXD 0x110b0
+- MX6QDL_PAD_KEY_COL1__AUD5_TXFS 0x130b0
++ MX6QDL_PAD_DISP0_DAT19__AUD5_RXD 0x130b0 /*brk*/
++ MX6QDL_PAD_KEY_COL0__AUD5_TXC 0x130b0 /*ok*/
++ MX6QDL_PAD_KEY_ROW0__AUD5_TXD 0x110b0 /*brk*/
++ MX6QDL_PAD_KEY_COL1__AUD5_TXFS 0x130b0 /*ok*/
+ MX6QDL_PAD_GPIO_5__CCM_CLKO1 0x130b0
+ >;
+ };
+@@ -219,7 +298,7 @@
+ * Similar to pinctrl_usbotg_2, but we want it
+ * pulled down for a fixed host connection.
+ */
+- fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
++ fsl,pins = <MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x13059>;
+ };
+
+ pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
+@@ -242,6 +321,13 @@
+ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
+ >;
+ };
++
++ pinctrl_hummingboard_pcie_reset: hummingboard-pcie-reset {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_DA4__GPIO3_IO04 0x80000000
++ >;
++ };
++
+ };
+ };
+
+@@ -256,6 +342,14 @@
+ status = "okay";
+ };
+
++&pwm3 {
++ status = "disabled";
++};
++
++&pwm4 {
++ status = "disabled";
++};
++
+ &spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_spdif>;
+@@ -291,3 +385,48 @@
+ cd-gpios = <&gpio1 4 0>;
+ status = "okay";
+ };
++
++&gpc {
++ fsl,cpu_pupscr_sw2iso = <0xf>;
++ fsl,cpu_pupscr_sw = <0xf>;
++ fsl,cpu_pdnscr_iso2sw = <0x1>;
++ fsl,cpu_pdnscr_iso = <0x1>;
++ status = "okay";
++};
++
++&pcie {
++ pinctrl-names = "default";
++ pinctrl-0 = <
++ &pinctrl_hummingboard_pcie_reset
++ >;
++ reset-gpio = <&gpio3 4 0>;
++ status = "okay";
++ no-msi;
++};
++
++&ecspi1 {
++ status = "okay";
++ fsl,spi-num-chipselects = <1>;
++};
++
++&ecspi2 {
++ status = "okay";
++ fsl,spi-num-chipselects = <2>;
++};
++
++&ecspi3 {
++ status = "okay";
++ fsl,spi-num-chipselects = <3>;
++};
++
++&dcic1 {
++ dcic_id = <0>;
++ dcic_mux = "dcic-hdmi";
++ status = "okay";
++};
++
++&dcic2 {
++ dcic_id = <1>;
++ dcic_mux = "dcic-lvds1";
++ status = "okay";
++};
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6qdl-microsom.dtsi linux-xbian-imx6/arch/arm/boot/dts/imx6qdl-microsom.dtsi
+--- linux-4.1.3/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2015-07-27 23:13:00.303898027 +0200
+@@ -39,15 +39,98 @@
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
++#include <dt-bindings/gpio/gpio.h>
++/ {
++ clk_sdio: sdio-clock {
++ compatible = "gpio-gate-clock";
++ #clock-cells = <0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_osc>;
++ enable-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH>;
++ };
++
++ regulators {
++ compatible = "simple-bus";
++
++ reg_brcm: brcm-reg {
++ compatible = "regulator-fixed";
++ enable-active-high;
++ gpio = <&gpio3 19 0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_reg>;
++ regulator-name = "brcm_reg";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ startup-delay-us = <200000>;
++ };
++ };
++
++ usdhc1_pwrseq: usdhc1_pwrseq {
++ compatible = "mmc-pwrseq-simple";
++ reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>,
++ <&gpio6 0 GPIO_ACTIVE_LOW>;
++ clocks = <&clk_sdio>;
++ clock-names = "ext_clock";
++ };
++};
+
+ &iomuxc {
+ microsom {
++ pinctrl_microsom_brcm_bt: microsom-brcm-bt {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT14__GPIO6_IO00 0x40013070
++ MX6QDL_PAD_CSI0_DAT15__GPIO6_IO01 0x40013070
++ MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_osc: microsom-brcm-osc {
++ fsl,pins = <
++ MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_reg: microsom-brcm-reg {
++ fsl,pins = <
++ MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x40013070
++ >;
++ };
++
++ pinctrl_microsom_brcm_wifi: microsom-brcm-wifi {
++ fsl,pins = <
++ MX6QDL_PAD_GPIO_8__XTALOSC_REF_CLK_32K 0x1b0b0
++ MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x40013070
++ MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x40013070
++ MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x40013070
++ >;
++ };
++
+ pinctrl_microsom_uart1: microsom-uart1 {
+ fsl,pins = <
+ MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1
+ MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
+ >;
+ };
++
++ pinctrl_microsom_uart4: microsom-uart4 {
++ fsl,pins = <
++ MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B 0x1b0b1
++ MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B 0x1b0b1
++ >;
++ };
++
++ pinctrl_microsom_usdhc1: microsom-usdhc1 {
++ fsl,pins = <
++ MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059
++ MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059
++ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059
++ MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059
++ MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059
++ MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059
++ >;
++ };
+ };
+ };
+
+@@ -56,3 +139,23 @@
+ pinctrl-0 = <&pinctrl_microsom_uart1>;
+ status = "okay";
+ };
++
++/* UART4 - Connected to optional BRCM Wifi/BT/FM */
++&uart4 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_bt &pinctrl_microsom_uart4>;
++ fsl,uart-has-rtscts;
++ status = "okay";
++};
++
++/* USDHC1 - Connected to optional BRCM Wifi/BT/FM */
++&usdhc1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_microsom_brcm_wifi &pinctrl_microsom_usdhc1>;
++ bus-width = <4>;
++ mmc-pwrseq = <&usdhc1_pwrseq>;
++ keep-power-in-suspend;
++ non-removable;
++ vmmc-supply = <&reg_brcm>;
++ status = "okay";
++};
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6q.dtsi linux-xbian-imx6/arch/arm/boot/dts/imx6q.dtsi
+--- linux-4.1.3/arch/arm/boot/dts/imx6q.dtsi 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6q.dtsi 2015-07-27 23:13:00.303898027 +0200
+@@ -14,6 +14,7 @@
+
+ / {
+ aliases {
++ ipu1 = &ipu2;
+ spi4 = &ecspi5;
+ };
+
+@@ -47,9 +48,12 @@
+ <&clks IMX6QDL_CLK_PLL2_PFD2_396M>,
+ <&clks IMX6QDL_CLK_STEP>,
+ <&clks IMX6QDL_CLK_PLL1_SW>,
+- <&clks IMX6QDL_CLK_PLL1_SYS>;
++ <&clks IMX6QDL_CLK_PLL1_SYS>,
++ <&clks IMX6QDL_PLL1_BYPASS>,
++ <&clks IMX6QDL_CLK_PLL1>,
++ <&clks IMX6QDL_PLL1_BYPASS_SRC> ;
+ clock-names = "arm", "pll2_pfd2_396m", "step",
+- "pll1_sw", "pll1_sys";
++ "pll1_sw", "pll1_sys", "pll1_bypass", "pll1", "pll1_bypass_src";
+ arm-supply = <&reg_arm>;
+ pu-supply = <&reg_pu>;
+ soc-supply = <&reg_soc>;
+@@ -78,9 +82,85 @@
+ };
+
+ soc {
+- ocram: sram@00900000 {
++
++ busfreq { /* BUSFREQ */
++ compatible = "fsl,imx6_busfreq";
++ clocks = <&clks 171>, <&clks 6>, <&clks 11>, <&clks 104>, <&clks 172>, <&clks 58>,
++ <&clks 18>, <&clks 60>, <&clks 20>, <&clks 3>;
++ clock-names = "pll2_bus", "pll2_pfd2_396m", "pll2_198m", "arm", "pll3_usb_otg", "periph",
++ "periph_pre", "periph_clk2", "periph_clk2_sel", "osc";
++ interrupts = <0 107 0x04>, <0 112 0x4>, <0 113 0x4>, <0 114 0x4>;
++ interrupt-names = "irq_busfreq_0", "irq_busfreq_1", "irq_busfreq_2", "irq_busfreq_3";
++ fsl,max_ddr_freq = <528000000>;
++ };
++
++ gpu@00130000 {
++ compatible = "fsl,imx6q-gpu";
++ reg = <0x00130000 0x4000>, <0x00134000 0x4000>,
++ <0x02204000 0x4000>, <0x0 0x0>;
++ reg-names = "iobase_3d", "iobase_2d",
++ "iobase_vg", "phys_baseaddr";
++ interrupts = <0 9 0x04>, <0 10 0x04>,<0 11 0x04>;
++ interrupt-names = "irq_3d", "irq_2d", "irq_vg";
++ clocks = <&clks 26>, <&clks 143>,
++ <&clks 27>, <&clks 121>,
++ <&clks 122>, <&clks 74>;
++ clock-names = "gpu2d_axi_clk", "openvg_axi_clk",
++ "gpu3d_axi_clk", "gpu2d_clk",
++ "gpu3d_clk", "gpu3d_shader_clk";
++ resets = <&src 0>, <&src 3>, <&src 3>;
++ reset-names = "gpu3d", "gpu2d", "gpuvg";
++ power-domains = <&gpc 1>;
++ };
++
++ hdmi_core: hdmi_core@00120000 {
++ compatible = "fsl,imx6q-hdmi-core";
++ reg = <0x00120000 0x9000>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_video: hdmi_video@020e0000 {
++ compatible = "fsl,imx6q-hdmi-video";
++ reg = <0x020e0000 0x1000>;
++ reg-names = "hdmi_gpr";
++ interrupts = <0 115 0x04>;
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ status = "disabled";
++ };
++
++ hdmi_audio: hdmi_audio@00120000 {
++ compatible = "fsl,imx6q-hdmi-audio";
++ clocks = <&clks 124>, <&clks 123>;
++ clock-names = "hdmi_isfr", "hdmi_iahb";
++ dmas = <&sdma 2 23 0>;
++ dma-names = "tx";
++ status = "disabled";
++ };
++
++ hdmi_cec: hdmi_cec@00120000 {
++ compatible = "fsl,imx6q-hdmi-cec";
++ interrupts = <0 115 0x04>;
++ status = "disabled";
++ };
++
++ ocrams: sram@00900000 {
++ compatible = "fsl,lpm-sram";
++ reg = <0x00900000 0x4000>;
++ clocks = <&clks IMX6QDL_CLK_OCRAM>;
++ };
++
++ ocrams_ddr: sram@00904000 {
++ compatible = "fsl,ddr-lpm-sram";
++ reg = <0x00904000 0x1000>;
++ clocks = <&clks IMX6QDL_CLK_OCRAM>;
++ };
++
++ ocram: sram@00905000 {
+ compatible = "mmio-sram";
+- reg = <0x00900000 0x40000>;
++ reg = <0x00905000 0x3B000>;
+ clocks = <&clks IMX6QDL_CLK_OCRAM>;
+ };
+
+@@ -101,6 +181,10 @@
+ };
+ };
+
++ vpu@02040000 {
++ status = "okay";
++ };
++
+ iomuxc: iomuxc@020e0000 {
+ compatible = "fsl,imx6q-iomuxc";
+
+@@ -154,165 +238,33 @@
+ };
+
+ ipu2: ipu@02800000 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+ compatible = "fsl,imx6q-ipu";
+ reg = <0x02800000 0x400000>;
+ interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 7 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&clks IMX6QDL_CLK_IPU2>,
+- <&clks IMX6QDL_CLK_IPU2_DI0>,
+- <&clks IMX6QDL_CLK_IPU2_DI1>;
+- clock-names = "bus", "di0", "di1";
++ clocks = <&clks 133>, <&clks 134>, <&clks 137>,
++ <&clks 41>, <&clks 42>,
++ <&clks 135>, <&clks 136>;
++ clock-names = "bus", "di0", "di1",
++ "di0_sel", "di1_sel",
++ "ldb_di0", "ldb_di1";
+ resets = <&src 4>;
+-
+- ipu2_csi0: port@0 {
+- reg = <0>;
+- };
+-
+- ipu2_csi1: port@1 {
+- reg = <1>;
+- };
+-
+- ipu2_di0: port@2 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <2>;
+-
+- ipu2_di0_disp0: endpoint@0 {
+- };
+-
+- ipu2_di0_hdmi: endpoint@1 {
+- remote-endpoint = <&hdmi_mux_2>;
+- };
+-
+- ipu2_di0_mipi: endpoint@2 {
+- };
+-
+- ipu2_di0_lvds0: endpoint@3 {
+- remote-endpoint = <&lvds0_mux_2>;
+- };
+-
+- ipu2_di0_lvds1: endpoint@4 {
+- remote-endpoint = <&lvds1_mux_2>;
+- };
+- };
+-
+- ipu2_di1: port@3 {
+- #address-cells = <1>;
+- #size-cells = <0>;
+- reg = <3>;
+-
+- ipu2_di1_hdmi: endpoint@1 {
+- remote-endpoint = <&hdmi_mux_3>;
+- };
+-
+- ipu2_di1_mipi: endpoint@2 {
+- };
+-
+- ipu2_di1_lvds0: endpoint@3 {
+- remote-endpoint = <&lvds0_mux_3>;
+- };
+-
+- ipu2_di1_lvds1: endpoint@4 {
+- remote-endpoint = <&lvds1_mux_3>;
+- };
+- };
+- };
+- };
+-
+- display-subsystem {
+- compatible = "fsl,imx-display-subsystem";
+- ports = <&ipu1_di0>, <&ipu1_di1>, <&ipu2_di0>, <&ipu2_di1>;
+- };
+-};
+-
+-&hdmi {
+- compatible = "fsl,imx6q-hdmi";
+-
+- port@2 {
+- reg = <2>;
+-
+- hdmi_mux_2: endpoint {
+- remote-endpoint = <&ipu2_di0_hdmi>;
+- };
+- };
+-
+- port@3 {
+- reg = <3>;
+-
+- hdmi_mux_3: endpoint {
+- remote-endpoint = <&ipu2_di1_hdmi>;
++ bypass_reset = <0>;
+ };
+ };
+ };
+
+ &ldb {
+- clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
++ clocks = <&clks IMX6QDL_CLK_LDB_DI0>, <&clks IMX6QDL_CLK_LDB_DI1>,
+ <&clks IMX6QDL_CLK_IPU1_DI0_SEL>, <&clks IMX6QDL_CLK_IPU1_DI1_SEL>,
+ <&clks IMX6QDL_CLK_IPU2_DI0_SEL>, <&clks IMX6QDL_CLK_IPU2_DI1_SEL>,
+- <&clks IMX6QDL_CLK_LDB_DI0>, <&clks IMX6QDL_CLK_LDB_DI1>;
+- clock-names = "di0_pll", "di1_pll",
+- "di0_sel", "di1_sel", "di2_sel", "di3_sel",
+- "di0", "di1";
+-
+- lvds-channel@0 {
+- port@2 {
+- reg = <2>;
+-
+- lvds0_mux_2: endpoint {
+- remote-endpoint = <&ipu2_di0_lvds0>;
+- };
+- };
+-
+- port@3 {
+- reg = <3>;
+-
+- lvds0_mux_3: endpoint {
+- remote-endpoint = <&ipu2_di1_lvds0>;
+- };
+- };
+- };
+-
+- lvds-channel@1 {
+- port@2 {
+- reg = <2>;
+-
+- lvds1_mux_2: endpoint {
+- remote-endpoint = <&ipu2_di0_lvds1>;
+- };
+- };
+-
+- port@3 {
+- reg = <3>;
+-
+- lvds1_mux_3: endpoint {
+- remote-endpoint = <&ipu2_di1_lvds1>;
+- };
+- };
+- };
+-};
+-
+-&mipi_dsi {
+- ports {
+- port@2 {
+- reg = <2>;
+-
+- mipi_mux_2: endpoint {
+- remote-endpoint = <&ipu2_di0_mipi>;
+- };
+- };
+-
+- port@3 {
+- reg = <3>;
+-
+- mipi_mux_3: endpoint {
+- remote-endpoint = <&ipu2_di1_mipi>;
+- };
+- };
+- };
+-};
+-
+-&vpu {
+- compatible = "fsl,imx6q-vpu", "cnm,coda960";
++ <&clks IMX6QDL_CLK_LDB_DI0_DIV_3_5>, <&clks IMX6QDL_CLK_LDB_DI1_DIV_3_5>,
++ <&clks IMX6QDL_CLK_LDB_DI0_DIV_7>, <&clks IMX6QDL_CLK_LDB_DI1_DIV_7>,
++ <&clks IMX6QDL_CLK_LDB_DI0_DIV_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_DIV_SEL>;
++ clock-names = "ldb_di0", "ldb_di1",
++ "di0_sel", "di1_sel",
++ "di2_sel", "di3_sel",
++ "ldb_di0_div_3_5", "ldb_di1_div_3_5",
++ "ldb_di0_div_7", "ldb_di1_div_7",
++ "ldb_di0_div_sel", "ldb_di1_div_sel";
+ };
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6q-hummingboard.dts linux-xbian-imx6/arch/arm/boot/dts/imx6q-hummingboard.dts
+--- linux-4.1.3/arch/arm/boot/dts/imx6q-hummingboard.dts 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6q-hummingboard.dts 2015-07-27 23:13:00.303898027 +0200
+@@ -57,3 +57,7 @@
+ fsl,transmit-atten-16ths = <9>;
+ fsl,receive-eq-mdB = <3000>;
+ };
++
++&sgtl5000 {
++ status = "okay";
++};
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6sl.dtsi linux-xbian-imx6/arch/arm/boot/dts/imx6sl.dtsi
+--- linux-4.1.3/arch/arm/boot/dts/imx6sl.dtsi 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6sl.dtsi 2015-07-27 23:13:00.307883804 +0200
+@@ -457,20 +457,21 @@
+ anatop-min-bit-val = <4>;
+ anatop-min-voltage = <800000>;
+ anatop-max-voltage = <1375000>;
++ anatop-enable-bit = <0>;
+ };
+
+- regulator-3p0@120 {
++ reg_3p0: regulator-3p0@120 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd3p0";
+- regulator-min-microvolt = <2800000>;
+- regulator-max-microvolt = <3150000>;
+- regulator-always-on;
++ regulator-min-microvolt = <2625000>;
++ regulator-max-microvolt = <3400000>;
+ anatop-reg-offset = <0x120>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2625000>;
+ anatop-max-voltage = <3400000>;
++ anatop-enable-bit = <0>;
+ };
+
+ regulator-2p5@130 {
+@@ -485,6 +486,7 @@
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2100000>;
+ anatop-max-voltage = <2850000>;
++ anatop-enable-bit = <0>;
+ };
+
+ reg_arm: regulator-vddcore@140 {
+@@ -552,6 +554,7 @@
+ reg = <0x020c9000 0x1000>;
+ interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBPHY1>;
++ phy-3p0-supply = <&reg_3p0>;
+ fsl,anatop = <&anatop>;
+ };
+
+@@ -560,6 +563,7 @@
+ reg = <0x020ca000 0x1000>;
+ interrupts = <0 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SL_CLK_USBPHY2>;
++ phy-3p0-supply = <&reg_3p0>;
+ fsl,anatop = <&anatop>;
+ };
+
+diff -Nur linux-4.1.3/arch/arm/boot/dts/imx6sx.dtsi linux-xbian-imx6/arch/arm/boot/dts/imx6sx.dtsi
+--- linux-4.1.3/arch/arm/boot/dts/imx6sx.dtsi 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/boot/dts/imx6sx.dtsi 2015-07-27 23:13:00.307883804 +0200
+@@ -556,20 +556,21 @@
+ anatop-min-bit-val = <4>;
+ anatop-min-voltage = <800000>;
+ anatop-max-voltage = <1375000>;
++ anatop-enable-bit = <0>;
+ };
+
+- regulator-3p0@120 {
++ reg_3p0: regulator-3p0@120 {
+ compatible = "fsl,anatop-regulator";
+ regulator-name = "vdd3p0";
+- regulator-min-microvolt = <2800000>;
+- regulator-max-microvolt = <3150000>;
+- regulator-always-on;
++ regulator-min-microvolt = <2625000>;
++ regulator-max-microvolt = <3400000>;
+ anatop-reg-offset = <0x120>;
+ anatop-vol-bit-shift = <8>;
+ anatop-vol-bit-width = <5>;
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2625000>;
+ anatop-max-voltage = <3400000>;
++ anatop-enable-bit = <0>;
+ };
+
+ regulator-2p5@130 {
+@@ -584,6 +585,7 @@
+ anatop-min-bit-val = <0>;
+ anatop-min-voltage = <2100000>;
+ anatop-max-voltage = <2875000>;
++ anatop-enable-bit = <0>;
+ };
+
+ reg_arm: regulator-vddcore@140 {
+@@ -650,6 +652,7 @@
+ reg = <0x020c9000 0x1000>;
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SX_CLK_USBPHY1>;
++ phy-3p0-supply = <&reg_3p0>;
+ fsl,anatop = <&anatop>;
+ };
+
+@@ -658,6 +661,7 @@
+ reg = <0x020ca000 0x1000>;
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SX_CLK_USBPHY2>;
++ phy-3p0-supply = <&reg_3p0>;
+ fsl,anatop = <&anatop>;
+ };
+
+diff -Nur linux-4.1.3/arch/arm/include/asm/glue-cache.h linux-xbian-imx6/arch/arm/include/asm/glue-cache.h
+--- linux-4.1.3/arch/arm/include/asm/glue-cache.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/include/asm/glue-cache.h 2015-07-27 23:13:00.746319518 +0200
+@@ -102,19 +102,19 @@
+ #endif
+
+ #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+-# ifdef _CACHE
++//# ifdef _CACHE
+ # define MULTI_CACHE 1
+-# else
+-# define _CACHE v6
+-# endif
++//# else
++//# define _CACHE v6
++//# endif
+ #endif
+
+ #if defined(CONFIG_CPU_V7)
+-# ifdef _CACHE
++//# ifdef _CACHE
+ # define MULTI_CACHE 1
+-# else
+-# define _CACHE v7
+-# endif
++//# else
++//# define _CACHE v7
++//# endif
+ #endif
+
+ #if defined(CONFIG_CPU_V7M)
+diff -Nur linux-4.1.3/arch/arm/Kconfig linux-xbian-imx6/arch/arm/Kconfig
+--- linux-4.1.3/arch/arm/Kconfig 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/Kconfig 2015-07-27 23:13:00.128523741 +0200
+@@ -1688,6 +1688,7 @@
+ range 11 64 if ARCH_SHMOBILE_LEGACY
+ default "12" if SOC_AM33XX
+ default "9" if SA1111 || ARCH_EFM32
++ default "14" if ARCH_MXC
+ default "11"
+ help
+ The kernel memory allocator divides physically contiguous memory
+diff -Nur linux-4.1.3/arch/arm/mach-imx/busfreq_ddr3.c linux-xbian-imx6/arch/arm/mach-imx/busfreq_ddr3.c
+--- linux-4.1.3/arch/arm/mach-imx/busfreq_ddr3.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/arch/arm/mach-imx/busfreq_ddr3.c 2015-07-27 23:13:01.073153409 +0200
+@@ -0,0 +1,514 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file busfreq_ddr3.c
++ *
++ * @brief iMX6 DDR3 frequency change specific file.
++ *
++ * @ingroup PM
++ */
++#define DEBUG
++
++#include <asm/cacheflush.h>
++#include <asm/fncpy.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/genalloc.h>
++#include <linux/interrupt.h>
++#include <linux/irqchip/arm-gic.h>
++#include <linux/kernel.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++#include <linux/slab.h>
++
++#include "hardware.h"
++
++/* DDR settings */
++static unsigned long (*iram_ddr_settings)[2];
++static unsigned long (*normal_mmdc_settings)[2];
++static unsigned long (*iram_iomux_settings)[2];
++static void __iomem *mmdc_base;
++static void __iomem *iomux_base;
++static void __iomem *ccm_base;
++static void __iomem *l2_base;
++static void __iomem *gic_dist_base;
++static u32 *irqs_used;
++
++static void *ddr_freq_change_iram_base;
++static int ddr_settings_size;
++static int iomux_settings_size;
++static volatile unsigned int cpus_in_wfe;
++static volatile bool wait_for_ddr_freq_update;
++static int curr_ddr_rate;
++
++void (*mx6_change_ddr_freq)(u32 freq, void *ddr_settings,
++ bool dll_mode, void *iomux_offsets) = NULL;
++
++extern unsigned int ddr_med_rate;
++extern unsigned int ddr_normal_rate;
++extern int low_bus_freq_mode;
++extern int audio_bus_freq_mode;
++extern void mx6_ddr3_freq_change(u32 freq, void *ddr_settings,
++ bool dll_mode, void *iomux_offsets);
++extern unsigned long save_ttbr1(void);
++extern void restore_ttbr1(unsigned long ttbr1);
++
++#ifdef CONFIG_SMP
++extern void __iomem *imx_scu_base;
++static unsigned int online_cpus;
++#endif
++
++#define MIN_DLL_ON_FREQ 333000000
++#define MAX_DLL_OFF_FREQ 125000000
++#define DDR_FREQ_CHANGE_SIZE 0x2000
++
++unsigned long ddr3_dll_mx6q[][2] = {
++ {0x0c, 0x0},
++ {0x10, 0x0},
++ {0x1C, 0x04088032},
++ {0x1C, 0x0408803a},
++ {0x1C, 0x08408030},
++ {0x1C, 0x08408038},
++ {0x818, 0x0},
++};
++
++unsigned long ddr3_calibration[][2] = {
++ {0x83c, 0x0},
++ {0x840, 0x0},
++ {0x483c, 0x0},
++ {0x4840, 0x0},
++ {0x848, 0x0},
++ {0x4848, 0x0},
++ {0x850, 0x0},
++ {0x4850, 0x0},
++};
++
++unsigned long ddr3_dll_mx6dl[][2] = {
++ {0x0c, 0x0},
++ {0x10, 0x0},
++ {0x1C, 0x04008032},
++ {0x1C, 0x0400803a},
++ {0x1C, 0x07208030},
++ {0x1C, 0x07208038},
++ {0x818, 0x0},
++};
++
++unsigned long iomux_offsets_mx6q[][2] = {
++ {0x5A8, 0x0},
++ {0x5B0, 0x0},
++ {0x524, 0x0},
++ {0x51C, 0x0},
++ {0x518, 0x0},
++ {0x50C, 0x0},
++ {0x5B8, 0x0},
++ {0x5C0, 0x0},
++};
++
++unsigned long iomux_offsets_mx6dl[][2] = {
++ {0x4BC, 0x0},
++ {0x4C0, 0x0},
++ {0x4C4, 0x0},
++ {0x4C8, 0x0},
++ {0x4CC, 0x0},
++ {0x4D0, 0x0},
++ {0x4D4, 0x0},
++ {0x4D8, 0x0},
++};
++
++unsigned long ddr3_400[][2] = {
++ {0x83c, 0x42490249},
++ {0x840, 0x02470247},
++ {0x483c, 0x42570257},
++ {0x4840, 0x02400240},
++ {0x848, 0x4039363C},
++ {0x4848, 0x3A39333F},
++ {0x850, 0x38414441},
++ {0x4850, 0x472D4833}
++};
++
++int can_change_ddr_freq(void)
++{
++ return 0;
++}
++
++/*
++ * each active core apart from the one changing
++ * the DDR frequency will execute this function.
++ * the rest of the cores have to remain in WFE
++ * state until the frequency is changed.
++ */
++irqreturn_t wait_in_wfe_irq(int irq, void *dev_id)
++{
++ u32 me = smp_processor_id();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0xff;
++
++ while (wait_for_ddr_freq_update)
++ wfe();
++
++ *((char *)(&cpus_in_wfe) + (u8)me) = 0;
++
++ return IRQ_HANDLED;
++}
++
++/* change the DDR frequency. */
++int update_ddr_freq(int ddr_rate)
++{
++ int i, j;
++ bool dll_off = false;
++ int me = 0;
++ unsigned long ttbr1;
++#ifdef CONFIG_SMP
++ unsigned int reg;
++ int cpu = 0;
++#endif
++
++ if (!can_change_ddr_freq())
++ return -1;
++
++ if (ddr_rate == curr_ddr_rate)
++ return 0;
++
++ printk(KERN_DEBUG "\nBus freq set to %d start...\n", ddr_rate);
++
++ if (low_bus_freq_mode || audio_bus_freq_mode)
++ dll_off = true;
++
++ iram_ddr_settings[0][0] = ddr_settings_size;
++ iram_iomux_settings[0][0] = iomux_settings_size;
++ if (ddr_rate == ddr_med_rate && cpu_is_imx6q() &&
++ ddr_med_rate != ddr_normal_rate) {
++ for (i = 0; i < ARRAY_SIZE(ddr3_dll_mx6q); i++) {
++ iram_ddr_settings[i + 1][0] =
++ normal_mmdc_settings[i][0];
++ iram_ddr_settings[i + 1][1] =
++ normal_mmdc_settings[i][1];
++ }
++ for (j = 0, i = ARRAY_SIZE(ddr3_dll_mx6q);
++ i < iram_ddr_settings[0][0]; j++, i++) {
++ iram_ddr_settings[i + 1][0] =
++ ddr3_400[j][0];
++ iram_ddr_settings[i + 1][1] =
++ ddr3_400[j][1];
++ }
++ } else if (ddr_rate == ddr_normal_rate) {
++ for (i = 0; i < iram_ddr_settings[0][0]; i++) {
++ iram_ddr_settings[i + 1][0] =
++ normal_mmdc_settings[i][0];
++ iram_ddr_settings[i + 1][1] =
++ normal_mmdc_settings[i][1];
++ }
++ }
++
++ /* ensure that all Cores are in WFE. */
++ local_irq_disable();
++
++#ifdef CONFIG_SMP
++ me = smp_processor_id();
++
++ /* Make sure all the online cores are active */
++ while (1) {
++ bool not_exited_busfreq = false;
++ for_each_online_cpu(cpu) {
++ u32 reg = __raw_readl(imx_scu_base + 0x08);
++ if (reg & (0x02 << (cpu * 8)))
++ not_exited_busfreq = true;
++ }
++ if (!not_exited_busfreq)
++ break;
++ }
++
++ wmb();
++ wait_for_ddr_freq_update = 1;
++ dsb();
++
++ online_cpus = readl_relaxed(imx_scu_base + 0x08);
++ for_each_online_cpu(cpu) {
++ *((char *)(&online_cpus) + (u8)cpu) = 0x02;
++ if (cpu != me) {
++ /* set the interrupt to be pending in the GIC. */
++ reg = 1 << (irqs_used[cpu] % 32);
++ writel_relaxed(reg, gic_dist_base + GIC_DIST_PENDING_SET
++ + (irqs_used[cpu] / 32) * 4);
++ }
++ }
++ /* Wait for the other active CPUs to idle */
++ while (1) {
++ u32 reg = readl_relaxed(imx_scu_base + 0x08);
++ reg |= (0x02 << (me * 8));
++ if (reg == online_cpus)
++ break;
++ }
++#endif
++
++ /* Ensure iram_tlb_phys_addr is flushed to DDR. */
++ /*__cpuc_flush_dcache_area(&iram_tlb_phys_addr, sizeof(iram_tlb_phys_addr));
++ outer_clean_range(virt_to_phys(&iram_tlb_phys_addr), virt_to_phys(&iram_tlb_phys_addr + 1));*/
++
++ /*
++ * Flush the TLB, to ensure no TLB maintenance occurs
++ * when DDR is in self-refresh.
++ */
++ local_flush_tlb_all();
++
++ ttbr1 = save_ttbr1();
++ /* Now we can change the DDR frequency. */
++ mx6_change_ddr_freq(ddr_rate, iram_ddr_settings,
++ dll_off, iram_iomux_settings);
++ restore_ttbr1(ttbr1);
++ curr_ddr_rate = ddr_rate;
++
++#ifdef CONFIG_SMP
++ wmb();
++ /* DDR frequency change is done . */
++ wait_for_ddr_freq_update = 0;
++ dsb();
++
++ /* wake up all the cores. */
++ sev();
++#endif
++
++ local_irq_enable();
++
++ printk(KERN_DEBUG "Bus freq set to %d done! cpu=%d\n", ddr_rate, me);
++
++ return 0;
++}
++
++int init_mmdc_ddr3_settings(struct platform_device *busfreq_pdev)
++{
++ struct device *dev = &busfreq_pdev->dev;
++ struct platform_device *ocram_dev;
++ unsigned int iram_paddr;
++ int i, err;
++ u32 cpu;
++ struct device_node *node;
++ struct gen_pool *iram_pool;
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-mmdc-combine");
++ if (!node) {
++ pr_err("failed to find imx6q-mmdc device tree data!\n");
++ return -EINVAL;
++ }
++ mmdc_base = of_iomap(node, 0);
++ WARN(!mmdc_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ if (cpu_is_imx6q())
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-iomuxc");
++ if (cpu_is_imx6dl())
++ node = of_find_compatible_node(NULL, NULL,
++ "fsl,imx6dl-iomuxc");
++ if (!node) {
++ pr_err("failed to find imx6q-iomux device tree data!\n");
++ return -EINVAL;
++ }
++ iomux_base = of_iomap(node, 0);
++ WARN(!iomux_base, "unable to map iomux registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
++ if (!node) {
++ pr_err("failed to find imx6q-ccm device tree data!\n");
++ return -EINVAL;
++ }
++ ccm_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map mmdc registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
++ if (!node) {
++ pr_err("failed to find imx6q-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ l2_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
++ if (!node) {
++ pr_err("failed to find imx6q-a9-gic device tree data!\n");
++ return -EINVAL;
++ }
++ gic_dist_base = of_iomap(node, 0);
++ WARN(!gic_dist_base, "unable to map gic dist registers\n");
++
++ if (cpu_is_imx6q())
++ ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6q) +
++ ARRAY_SIZE(ddr3_calibration);
++ if (cpu_is_imx6dl())
++ ddr_settings_size = ARRAY_SIZE(ddr3_dll_mx6dl) +
++ ARRAY_SIZE(ddr3_calibration);
++
++ normal_mmdc_settings = kmalloc((ddr_settings_size * 8), GFP_KERNEL);
++ if (cpu_is_imx6q()) {
++ memcpy(normal_mmdc_settings, ddr3_dll_mx6q,
++ sizeof(ddr3_dll_mx6q));
++ memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6q)),
++ ddr3_calibration, sizeof(ddr3_calibration));
++ }
++ if (cpu_is_imx6dl()) {
++ memcpy(normal_mmdc_settings, ddr3_dll_mx6dl,
++ sizeof(ddr3_dll_mx6dl));
++ memcpy(((char *)normal_mmdc_settings + sizeof(ddr3_dll_mx6dl)),
++ ddr3_calibration, sizeof(ddr3_calibration));
++ }
++ /* store the original DDR settings at boot. */
++ for (i = 0; i < ddr_settings_size; i++) {
++ /*
++ * writes via command mode register cannot be read back.
++ * hence hardcode them in the initial static array.
++ * this may require modification on a per customer basis.
++ */
++ if (normal_mmdc_settings[i][0] != 0x1C)
++ normal_mmdc_settings[i][1] =
++ readl_relaxed(mmdc_base
++ + normal_mmdc_settings[i][0]);
++ }
++
++ irqs_used = devm_kzalloc(dev, sizeof(u32) * num_present_cpus(),
++ GFP_KERNEL);
++
++ for_each_online_cpu(cpu) {
++ int irq;
++
++ /*
++ * set up a reserved interrupt to get all
++ * the active cores into a WFE state
++ * before changing the DDR frequency.
++ */
++ irq = platform_get_irq(busfreq_pdev, cpu);
++ err = request_irq(irq, wait_in_wfe_irq,
++ IRQF_PERCPU, "mmdc_1", NULL);
++ if (err) {
++ dev_err(dev,
++ "Busfreq:request_irq failed %d, err = %d\n",
++ irq, err);
++ return err;
++ }
++ err = irq_set_affinity(irq, cpumask_of(cpu));
++ if (err) {
++ dev_err(dev,
++ "Busfreq: Cannot set irq affinity irq=%d,\n",
++ irq);
++ return err;
++ }
++ irqs_used[cpu] = irq;
++ }
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ dev_err(dev, "%s: failed to find ocram node\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ ocram_dev = of_find_device_by_node(node);
++ if (!ocram_dev) {
++ dev_err(dev, "failed to find ocram device!\n");
++ return -EINVAL;
++ }
++
++ iram_pool = dev_get_gen_pool(&ocram_dev->dev);
++ if (!iram_pool) {
++ dev_err(dev, "iram pool unavailable!\n");
++ return -EINVAL;
++ }
++
++ iomux_settings_size = ARRAY_SIZE(iomux_offsets_mx6q);
++ iram_iomux_settings = (void*)gen_pool_alloc(iram_pool,
++ (iomux_settings_size * 8) + 8);
++ if (!iram_iomux_settings) {
++ dev_err(dev, "unable to alloc iram for IOMUX settings!\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Allocate extra space to store the number of entries in the
++ * ddr_settings plus 4 extra regsiter information that needs
++ * to be passed to the frequency change code.
++ * sizeof(iram_ddr_settings) = sizeof(ddr_settings) +
++ * entries in ddr_settings + 16.
++ * The last 4 enties store the addresses of the registers:
++ * CCM_BASE_ADDR
++ * MMDC_BASE_ADDR
++ * IOMUX_BASE_ADDR
++ * L2X0_BASE_ADDR
++ */
++ iram_ddr_settings = (void*)gen_pool_alloc(iram_pool,
++ (ddr_settings_size * 8) + 8 + 32);
++ if (!iram_ddr_settings) {
++ dev_err(dev, "unable to alloc iram for ddr settings!\n");
++ return -ENOMEM;
++ }
++ i = ddr_settings_size + 1;
++ iram_ddr_settings[i][0] = (unsigned long)mmdc_base;
++ iram_ddr_settings[i+1][0] = (unsigned long)ccm_base;
++ iram_ddr_settings[i+2][0] = (unsigned long)iomux_base;
++ iram_ddr_settings[i+3][0] = (unsigned long)l2_base;
++
++ if (cpu_is_imx6q()) {
++ /* store the IOMUX settings at boot. */
++ for (i = 0; i < iomux_settings_size; i++) {
++ iomux_offsets_mx6q[i][1] =
++ readl_relaxed(iomux_base +
++ iomux_offsets_mx6q[i][0]);
++ iram_iomux_settings[i+1][0] = iomux_offsets_mx6q[i][0];
++ iram_iomux_settings[i+1][1] = iomux_offsets_mx6q[i][1];
++ }
++ }
++
++ if (cpu_is_imx6dl()) {
++ for (i = 0; i < iomux_settings_size; i++) {
++ iomux_offsets_mx6dl[i][1] =
++ readl_relaxed(iomux_base +
++ iomux_offsets_mx6dl[i][0]);
++ iram_iomux_settings[i+1][0] = iomux_offsets_mx6dl[i][0];
++ iram_iomux_settings[i+1][1] = iomux_offsets_mx6dl[i][1];
++ }
++ }
++
++ ddr_freq_change_iram_base = (void*)gen_pool_alloc(iram_pool,
++ DDR_FREQ_CHANGE_SIZE);
++ if (!ddr_freq_change_iram_base) {
++ dev_err(dev, "Cannot alloc iram for ddr freq change code!\n");
++ return -ENOMEM;
++ }
++
++ iram_paddr = gen_pool_virt_to_phys(iram_pool,
++ (unsigned long)ddr_freq_change_iram_base);
++ /*
++ * Need to remap the area here since we want
++ * the memory region to be executable.
++ */
++ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
++ DDR_FREQ_CHANGE_SIZE,
++ MT_MEMORY_RWX_NONCACHED);
++ mx6_change_ddr_freq = (void *)fncpy(ddr_freq_change_iram_base,
++ &mx6_ddr3_freq_change, DDR_FREQ_CHANGE_SIZE);
++
++ curr_ddr_rate = ddr_normal_rate;
++
++ return 0;
++}
+diff -Nur linux-4.1.3/arch/arm/mach-imx/busfreq-imx6.c linux-xbian-imx6/arch/arm/mach-imx/busfreq-imx6.c
+--- linux-4.1.3/arch/arm/mach-imx/busfreq-imx6.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/arch/arm/mach-imx/busfreq-imx6.c 2015-07-27 23:13:01.073153409 +0200
+@@ -0,0 +1,984 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file busfreq-imx6.c
++ *
++ * @brief A common API for the Freescale Semiconductor iMX6 Busfreq API
++ *
++ * The APIs are for setting bus frequency to different values based on the
++ * highest freqeuncy requested.
++ *
++ * @ingroup PM
++ */
++
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <asm/cacheflush.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/reboot.h>
++#include <linux/regulator/consumer.h>
++#include <linux/sched.h>
++#include <linux/suspend.h>
++#include "hardware.h"
++
++#define LPAPM_CLK 24000000
++#define DDR3_AUDIO_CLK 50000000
++#define LPDDR2_AUDIO_CLK 100000000
++
++int vpu352 = 0;
++
++int high_bus_freq_mode;
++int med_bus_freq_mode;
++int audio_bus_freq_mode;
++int low_bus_freq_mode;
++int ultra_low_bus_freq_mode;
++unsigned int ddr_med_rate;
++unsigned int ddr_normal_rate;
++
++#ifdef CONFIG_ARM_IMX6Q_CPUFREQ
++static int bus_freq_scaling_initialized;
++static struct device *busfreq_dev;
++static int busfreq_suspended;
++static u32 org_arm_rate;
++static int bus_freq_scaling_is_active;
++static int high_bus_count, med_bus_count, audio_bus_count, low_bus_count;
++static unsigned int ddr_low_rate;
++
++extern int init_mmdc_lpddr2_settings(struct platform_device *dev);
++extern int init_mmdc_ddr3_settings(struct platform_device *dev);
++extern int update_ddr_freq(int ddr_rate);
++extern int update_lpddr2_freq(int ddr_rate);
++
++DEFINE_MUTEX(bus_freq_mutex);
++static DEFINE_SPINLOCK(freq_lock);
++
++static struct clk *pll2_400;
++static struct clk *periph_clk;
++static struct clk *periph_pre_clk;
++static struct clk *periph_clk2_sel;
++static struct clk *periph_clk2;
++static struct clk *osc_clk;
++static struct clk *cpu_clk;
++static struct clk *pll3;
++static struct clk *pll2;
++static struct clk *pll2_200;
++static struct clk *pll1_sys;
++static struct clk *periph2_clk;
++static struct clk *ocram_clk;
++static struct clk *ahb_clk;
++static struct clk *pll1_sw_clk;
++static struct clk *periph2_pre_clk;
++static struct clk *periph2_clk2_sel;
++static struct clk *periph2_clk2;
++static struct clk *step_clk;
++static struct clk *axi_sel_clk;
++static struct clk *pll3_pfd1_540m;
++
++static u32 pll2_org_rate;
++static struct delayed_work low_bus_freq_handler;
++static struct delayed_work bus_freq_daemon;
++
++static void enter_lpm_imx6sl(void)
++{
++ unsigned long flags;
++
++ if (high_bus_freq_mode) {
++ pll2_org_rate = clk_get_rate(pll2);
++ /* Set periph_clk to be sourced from OSC_CLK */
++ clk_set_parent(periph_clk2_sel, osc_clk);
++ clk_set_parent(periph_clk, periph_clk2);
++ /* Ensure AHB/AXI clks are at 24MHz. */
++ clk_set_rate(ahb_clk, LPAPM_CLK);
++ clk_set_rate(ocram_clk, LPAPM_CLK);
++ }
++ if (audio_bus_count) {
++ /* Set AHB to 8MHz to lower pwer.*/
++ clk_set_rate(ahb_clk, LPAPM_CLK / 3);
++
++ /* Set up DDR to 100MHz. */
++ spin_lock_irqsave(&freq_lock, flags);
++ update_lpddr2_freq(LPDDR2_AUDIO_CLK);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /* Fix the clock tree in kernel */
++ clk_set_rate(pll2, pll2_org_rate);
++ clk_set_parent(periph2_pre_clk, pll2_200);
++ clk_set_parent(periph2_clk, periph2_pre_clk);
++
++ if (low_bus_freq_mode || ultra_low_bus_freq_mode) {
++ /*
++ * Swtich ARM to run off PLL2_PFD2_400MHz
++ * since DDR is anyway at 100MHz.
++ */
++ clk_set_parent(step_clk, pll2_400);
++ clk_set_parent(pll1_sw_clk, step_clk);
++ /*
++ * Ensure that the clock will be
++ * at original speed.
++ */
++ clk_set_rate(cpu_clk, org_arm_rate);
++ }
++ low_bus_freq_mode = 0;
++ ultra_low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 1;
++ } else {
++ u32 arm_div, pll1_rate;
++ org_arm_rate = clk_get_rate(cpu_clk);
++ if (low_bus_freq_mode && low_bus_count == 0) {
++ /*
++ * We are already in DDR @ 24MHz state, but
++ * no one but ARM needs the DDR. In this case,
++ * we can lower the DDR freq to 1MHz when ARM
++ * enters WFI in this state. Keep track of this state.
++ */
++ ultra_low_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++ } else {
++ if (!ultra_low_bus_freq_mode && !low_bus_freq_mode) {
++ /*
++ * Set DDR to 24MHz.
++ * Since we are going to bypass PLL2,
++ * we need to move ARM clk off PLL2_PFD2
++ * to PLL1. Make sure the PLL1 is running
++ * at the lowest possible freq.
++ */
++ clk_set_rate(pll1_sys,
++ clk_round_rate(pll1_sys, org_arm_rate));
++ pll1_rate = clk_get_rate(pll1_sys);
++ arm_div = pll1_rate / org_arm_rate + 1;
++ /*
++ * Ensure ARM CLK is lower before
++ * changing the parent.
++ */
++ clk_set_rate(cpu_clk, org_arm_rate / arm_div);
++ /* Now set the ARM clk parent to PLL1_SYS. */
++ clk_set_parent(pll1_sw_clk, pll1_sys);
++
++ /*
++ * Set STEP_CLK back to OSC to save power and
++ * also to maintain the parent.The WFI iram code
++ * will switch step_clk to osc, but the clock API
++ * is not aware of the change and when a new request
++ * to change the step_clk parent to pll2_pfd2_400M
++ * is requested sometime later, the change is ignored.
++ */
++ clk_set_parent(step_clk, osc_clk);
++ /* Now set DDR to 24MHz. */
++ spin_lock_irqsave(&freq_lock, flags);
++ update_lpddr2_freq(LPAPM_CLK);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /*
++ * Fix the clock tree in kernel.
++ * Make sure PLL2 rate is updated as it gets
++ * bypassed in the DDR freq change code.
++ */
++ clk_set_rate(pll2, LPAPM_CLK);
++ clk_set_parent(periph2_clk2_sel, pll2);
++ clk_set_parent(periph2_clk, periph2_clk2_sel);
++
++ }
++ if (low_bus_count == 0) {
++ ultra_low_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ } else {
++ ultra_low_bus_freq_mode = 0;
++ low_bus_freq_mode = 1;
++ }
++ audio_bus_freq_mode = 0;
++ }
++ }
++}
++
++static void exit_lpm_imx6sl(void)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&freq_lock, flags);
++ /* Change DDR freq in IRAM. */
++ update_lpddr2_freq(ddr_normal_rate);
++ spin_unlock_irqrestore(&freq_lock, flags);
++
++ /*
++ * Fix the clock tree in kernel.
++ * Make sure PLL2 rate is updated as it gets
++ * un-bypassed in the DDR freq change code.
++ */
++ clk_set_rate(pll2, pll2_org_rate);
++ clk_set_parent(periph2_pre_clk, pll2_400);
++ clk_set_parent(periph2_clk, periph2_pre_clk);
++
++ /* Ensure that periph_clk is sourced from PLL2_400. */
++ clk_set_parent(periph_pre_clk, pll2_400);
++ /*
++ * Before switching the perhiph_clk, ensure that the
++ * AHB/AXI will not be too fast.
++ */
++ clk_set_rate(ahb_clk, LPAPM_CLK / 3);
++ clk_set_rate(ocram_clk, LPAPM_CLK / 2);
++ clk_set_parent(periph_clk, periph_pre_clk);
++
++ if (low_bus_freq_mode || ultra_low_bus_freq_mode) {
++ /* Move ARM from PLL1_SW_CLK to PLL2_400. */
++ clk_set_parent(step_clk, pll2_400);
++ clk_set_parent(pll1_sw_clk, step_clk);
++ clk_set_rate(cpu_clk, org_arm_rate);
++ ultra_low_bus_freq_mode = 0;
++ }
++}
++
++int reduce_bus_freq(void)
++{
++ int ret = 0;
++ clk_prepare_enable(pll3);
++ if (cpu_is_imx6sl())
++ enter_lpm_imx6sl();
++ else {
++ if (cpu_is_imx6dl() && (clk_get_parent(axi_sel_clk)
++ != periph_clk))
++ /* Set axi to periph_clk */
++ clk_set_parent(axi_sel_clk, periph_clk);
++
++ if (audio_bus_count) {
++ /* Need to ensure that PLL2_PFD_400M is kept ON. */
++ clk_prepare_enable(pll2_400);
++ update_ddr_freq(DDR3_AUDIO_CLK);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, pll2_200);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ audio_bus_freq_mode = 1;
++ low_bus_freq_mode = 0;
++ } else {
++ update_ddr_freq(LPAPM_CLK);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, osc_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ /* Set periph_clk parent to OSC via periph_clk2_sel */
++ ret = clk_set_parent(periph_clk, periph_clk2);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ if (audio_bus_freq_mode)
++ clk_disable_unprepare(pll2_400);
++ low_bus_freq_mode = 1;
++ audio_bus_freq_mode = 0;
++ }
++ }
++ clk_disable_unprepare(pll3);
++
++ med_bus_freq_mode = 0;
++ high_bus_freq_mode = 0;
++
++ if (audio_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to audio mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++ if (low_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to low mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++
++ return ret;
++}
++
++static void reduce_bus_freq_handler(struct work_struct *work)
++{
++ mutex_lock(&bus_freq_mutex);
++
++ reduce_bus_freq();
++
++ mutex_unlock(&bus_freq_mutex);
++}
++
++/*
++ * Set the DDR, AHB to 24MHz.
++ * This mode will be activated only when none of the modules that
++ * need a higher DDR or AHB frequency are active.
++ */
++int set_low_bus_freq(void)
++{
++ if (busfreq_suspended)
++ return 0;
++
++ if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
++ return 0;
++
++ /*
++ * Check to see if we need to got from
++ * low bus freq mode to audio bus freq mode.
++ * If so, the change needs to be done immediately.
++ */
++ if (audio_bus_count && (low_bus_freq_mode || ultra_low_bus_freq_mode))
++ reduce_bus_freq();
++ else
++ /*
++ * Don't lower the frequency immediately. Instead
++ * scheduled a delayed work and drop the freq if
++ * the conditions still remain the same.
++ */
++ schedule_delayed_work(&low_bus_freq_handler,
++ usecs_to_jiffies(3000000));
++ return 0;
++}
++
++/*
++ * Set the DDR to either 528MHz or 400MHz for iMX6qd
++ * or 400MHz for iMX6dl.
++ */
++int set_high_bus_freq(int high_bus_freq)
++{
++ int ret = 0;
++ struct clk *periph_clk_parent;
++
++ if (bus_freq_scaling_initialized && bus_freq_scaling_is_active)
++ cancel_delayed_work_sync(&low_bus_freq_handler);
++
++ if (busfreq_suspended)
++ return 0;
++
++ if (cpu_is_imx6q())
++ periph_clk_parent = pll2;
++ else
++ periph_clk_parent = pll2_400;
++
++ if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
++ return 0;
++
++ if (high_bus_freq_mode)
++ return 0;
++
++ /* medium bus freq is only supported for MX6DQ */
++ if (med_bus_freq_mode && !high_bus_freq)
++ return 0;
++
++ clk_prepare_enable(pll3);
++ if (cpu_is_imx6sl())
++ exit_lpm_imx6sl();
++ else {
++ if (high_bus_freq) {
++ update_ddr_freq(ddr_normal_rate);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, periph_clk_parent);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ if (cpu_is_imx6dl() && (clk_get_parent(axi_sel_clk)
++ != pll3_pfd1_540m))
++ /* Set axi to pll3_pfd1_540m */
++ clk_set_parent(axi_sel_clk, pll3_pfd1_540m);
++ } else {
++ update_ddr_freq(ddr_med_rate);
++ /* Make sure periph clk's parent also got updated */
++ ret = clk_set_parent(periph_clk2_sel, pll3);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_pre_clk, pll2_400);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ ret = clk_set_parent(periph_clk, periph_pre_clk);
++ if (ret)
++ dev_WARN(busfreq_dev,
++ "%s: %d: clk set parent fail!\n",
++ __func__, __LINE__);
++ }
++ if (audio_bus_freq_mode)
++ clk_disable_unprepare(pll2_400);
++ }
++
++ high_bus_freq_mode = 1;
++ med_bus_freq_mode = 0;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++
++ clk_disable_unprepare(pll3);
++
++ if (high_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to high mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++ if (med_bus_freq_mode)
++ dev_dbg(busfreq_dev, "Bus freq set to med mode. Count:\
++ high %d, med %d, audio %d\n",
++ high_bus_count, med_bus_count, audio_bus_count);
++
++ return 0;
++}
++#endif
++
++void request_bus_freq(enum bus_freq_mode mode)
++{
++#ifdef CONFIG_ARM_IMX6Q_CPUFREQ
++ mutex_lock(&bus_freq_mutex);
++
++ if (mode == BUS_FREQ_HIGH)
++ high_bus_count++;
++ else if (mode == BUS_FREQ_MED)
++ med_bus_count++;
++ else if (mode == BUS_FREQ_AUDIO)
++ audio_bus_count++;
++ else if (mode == BUS_FREQ_LOW)
++ low_bus_count++;
++
++ if (busfreq_suspended || !bus_freq_scaling_initialized ||
++ !bus_freq_scaling_is_active) {
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ cancel_delayed_work_sync(&low_bus_freq_handler);
++
++ if (cpu_is_imx6dl()) {
++ /* No support for medium setpoint on MX6DL. */
++ if (mode == BUS_FREQ_MED) {
++ high_bus_count++;
++ mode = BUS_FREQ_HIGH;
++ }
++ }
++
++ if ((mode == BUS_FREQ_HIGH) && (!high_bus_freq_mode)) {
++ set_high_bus_freq(1);
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ if ((mode == BUS_FREQ_MED) && (!high_bus_freq_mode) &&
++ (!med_bus_freq_mode)) {
++ set_high_bus_freq(0);
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((mode == BUS_FREQ_AUDIO) && (!high_bus_freq_mode) &&
++ (!med_bus_freq_mode) && (!audio_bus_freq_mode)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ mutex_unlock(&bus_freq_mutex);
++#endif
++ return;
++}
++EXPORT_SYMBOL(request_bus_freq);
++
++void release_bus_freq(enum bus_freq_mode mode)
++{
++#ifdef CONFIG_ARM_IMX6Q_CPUFREQ
++ mutex_lock(&bus_freq_mutex);
++
++ if (mode == BUS_FREQ_HIGH) {
++ if (high_bus_count == 0) {
++ dev_err(busfreq_dev, "high bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ high_bus_count--;
++ } else if (mode == BUS_FREQ_MED) {
++ if (med_bus_count == 0) {
++ dev_err(busfreq_dev, "med bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ med_bus_count--;
++ } else if (mode == BUS_FREQ_AUDIO) {
++ if (audio_bus_count == 0) {
++ dev_err(busfreq_dev, "audio bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ audio_bus_count--;
++ } else if (mode == BUS_FREQ_LOW) {
++ if (low_bus_count == 0) {
++ dev_err(busfreq_dev, "low bus count mismatch!\n");
++ dump_stack();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ low_bus_count--;
++ }
++
++ if (busfreq_suspended || !bus_freq_scaling_initialized ||
++ !bus_freq_scaling_is_active) {
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ if (cpu_is_imx6dl()) {
++ /* No support for medium setpoint on MX6DL. */
++ if (mode == BUS_FREQ_MED) {
++ high_bus_count--;
++ mode = BUS_FREQ_HIGH;
++ }
++ }
++
++ if ((!audio_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count != 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((!low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0) &&
++ (low_bus_count != 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++ if ((!ultra_low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0) &&
++ (low_bus_count == 0)) {
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++ return;
++ }
++
++ mutex_unlock(&bus_freq_mutex);
++#endif
++ return;
++}
++EXPORT_SYMBOL(release_bus_freq);
++
++#ifdef CONFIG_ARM_IMX6Q_CPUFREQ
++static void bus_freq_daemon_handler(struct work_struct *work)
++{
++ mutex_lock(&bus_freq_mutex);
++ if ((!low_bus_freq_mode) && (!ultra_low_bus_freq_mode) && (high_bus_count == 0) &&
++ (med_bus_count == 0) && (audio_bus_count == 0))
++ set_low_bus_freq();
++ mutex_unlock(&bus_freq_mutex);
++}
++
++static ssize_t bus_freq_scaling_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (bus_freq_scaling_is_active)
++ return sprintf(buf, "Bus frequency scaling is enabled\n");
++ else
++ return sprintf(buf, "Bus frequency scaling is disabled\n");
++}
++
++static ssize_t vpu352_enable_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (vpu352)
++ return sprintf(buf, "VPU352M is enabled\n");
++ else
++ return sprintf(buf, "VPU352M is disabled\n");
++}
++
++static int vpu352_setup(char *options)
++{
++ return kstrtol(options, 0, (long int *)&vpu352);
++}
++
++static ssize_t bus_freq_scaling_enable_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t size)
++{
++ if (strncmp(buf, "1", 1) == 0) {
++ bus_freq_scaling_is_active = 1;
++ set_high_bus_freq(1);
++ /*
++ * We set bus freq to highest at the beginning,
++ * so we use this daemon thread to make sure system
++ * can enter low bus mode if
++ * there is no high bus request pending
++ */
++ schedule_delayed_work(&bus_freq_daemon,
++ usecs_to_jiffies(5000000));
++ } else if (strncmp(buf, "0", 1) == 0) {
++ if (bus_freq_scaling_is_active)
++ set_high_bus_freq(1);
++ bus_freq_scaling_is_active = 0;
++ }
++ return size;
++}
++
++static int bus_freq_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ mutex_lock(&bus_freq_mutex);
++
++ if (event == PM_SUSPEND_PREPARE) {
++ high_bus_count++;
++ set_high_bus_freq(1);
++ busfreq_suspended = 1;
++ } else if (event == PM_POST_SUSPEND) {
++ busfreq_suspended = 0;
++ high_bus_count--;
++ schedule_delayed_work(&bus_freq_daemon,
++ usecs_to_jiffies(5000000));
++ }
++
++ mutex_unlock(&bus_freq_mutex);
++
++ return NOTIFY_OK;
++}
++
++static int busfreq_reboot_notifier_event(struct notifier_block *this,
++ unsigned long event, void *ptr)
++{
++ /* System is rebooting. Set the system into high_bus_freq_mode. */
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ return 0;
++}
++
++static struct notifier_block imx_bus_freq_pm_notifier = {
++ .notifier_call = bus_freq_pm_notify,
++};
++
++static struct notifier_block imx_busfreq_reboot_notifier = {
++ .notifier_call = busfreq_reboot_notifier_event,
++};
++
++
++static DEVICE_ATTR(enable, 0644, bus_freq_scaling_enable_show,
++ bus_freq_scaling_enable_store);
++static DEVICE_ATTR(vpu352, 0444, vpu352_enable_show,
++ NULL);
++#endif
++
++/*!
++ * This is the probe routine for the bus frequency driver.
++ *
++ * @param pdev The platform device structure
++ *
++ * @return The function returns 0 on success
++ *
++ */
++
++static int busfreq_probe(struct platform_device *pdev)
++{
++#ifdef CONFIG_ARM_IMX6Q_CPUFREQ
++ u32 err;
++
++ busfreq_dev = &pdev->dev;
++
++ pll2_400 = devm_clk_get(&pdev->dev, "pll2_pfd2_396m");
++ if (IS_ERR(pll2_400)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_pfd2_396m\n",
++ __func__);
++ return PTR_ERR(pll2_400);
++ }
++
++ pll2_200 = devm_clk_get(&pdev->dev, "pll2_198m");
++ if (IS_ERR(pll2_200)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_198m\n",
++ __func__);
++ return PTR_ERR(pll2_200);
++ }
++
++ pll2 = devm_clk_get(&pdev->dev, "pll2_bus");
++ if (IS_ERR(pll2)) {
++ dev_err(busfreq_dev, "%s: failed to get pll2_bus\n",
++ __func__);
++ return PTR_ERR(pll2);
++ }
++
++ cpu_clk = devm_clk_get(&pdev->dev, "arm");
++ if (IS_ERR(cpu_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get cpu_clk\n",
++ __func__);
++ return PTR_ERR(cpu_clk);
++ }
++
++ pll3 = devm_clk_get(&pdev->dev, "pll3_usb_otg");
++ if (IS_ERR(pll3)) {
++ dev_err(busfreq_dev, "%s: failed to get pll3_usb_otg\n",
++ __func__);
++ return PTR_ERR(pll3);
++ }
++
++ periph_clk = devm_clk_get(&pdev->dev, "periph");
++ if (IS_ERR(periph_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph\n",
++ __func__);
++ return PTR_ERR(periph_clk);
++ }
++
++ periph_pre_clk = devm_clk_get(&pdev->dev, "periph_pre");
++ if (IS_ERR(periph_pre_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_pre\n",
++ __func__);
++ return PTR_ERR(periph_pre_clk);
++ }
++
++ periph_clk2 = devm_clk_get(&pdev->dev, "periph_clk2");
++ if (IS_ERR(periph_clk2)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_clk2\n",
++ __func__);
++ return PTR_ERR(periph_clk2);
++ }
++
++ periph_clk2_sel = devm_clk_get(&pdev->dev, "periph_clk2_sel");
++ if (IS_ERR(periph_clk2_sel)) {
++ dev_err(busfreq_dev, "%s: failed to get periph_clk2_sel\n",
++ __func__);
++ return PTR_ERR(periph_clk2_sel);
++ }
++
++ osc_clk = devm_clk_get(&pdev->dev, "osc");
++ if (IS_ERR(osc_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get osc_clk\n",
++ __func__);
++ return PTR_ERR(osc_clk);
++ }
++
++ if (cpu_is_imx6dl()) {
++ axi_sel_clk = devm_clk_get(&pdev->dev, "axi_sel");
++ if (IS_ERR(axi_sel_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get axi_sel_clk\n",
++ __func__);
++ return PTR_ERR(axi_sel_clk);
++ }
++
++ pll3_pfd1_540m = devm_clk_get(&pdev->dev, "pll3_pfd1_540m");
++ if (IS_ERR(pll3_pfd1_540m)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get pll3_pfd1_540m\n", __func__);
++ return PTR_ERR(pll3_pfd1_540m);
++ }
++ }
++
++ if (cpu_is_imx6sl()) {
++ pll1_sys = devm_clk_get(&pdev->dev, "pll1_sys");
++ if (IS_ERR(pll1_sys)) {
++ dev_err(busfreq_dev, "%s: failed to get pll1_sys\n",
++ __func__);
++ return PTR_ERR(pll1_sys);
++ }
++
++ ahb_clk = devm_clk_get(&pdev->dev, "ahb");
++ if (IS_ERR(ahb_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get ahb_clk\n",
++ __func__);
++ return PTR_ERR(ahb_clk);
++ }
++
++ ocram_clk = devm_clk_get(&pdev->dev, "ocram");
++ if (IS_ERR(ocram_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get ocram_clk\n",
++ __func__);
++ return PTR_ERR(ocram_clk);
++ }
++
++ pll1_sw_clk = devm_clk_get(&pdev->dev, "pll1_sw");
++ if (IS_ERR(pll1_sw_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get pll1_sw_clk\n",
++ __func__);
++ return PTR_ERR(pll1_sw_clk);
++ }
++
++ periph2_clk = devm_clk_get(&pdev->dev, "periph2");
++ if (IS_ERR(periph2_clk)) {
++ dev_err(busfreq_dev, "%s: failed to get periph2\n",
++ __func__);
++ return PTR_ERR(periph2_clk);
++ }
++
++ periph2_pre_clk = devm_clk_get(&pdev->dev, "periph2_pre");
++ if (IS_ERR(periph2_pre_clk)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_pre_clk\n",
++ __func__);
++ return PTR_ERR(periph2_pre_clk);
++ }
++
++ periph2_clk2 = devm_clk_get(&pdev->dev, "periph2_clk2");
++ if (IS_ERR(periph2_clk2)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_clk2\n",
++ __func__);
++ return PTR_ERR(periph2_clk2);
++ }
++
++ periph2_clk2_sel = devm_clk_get(&pdev->dev, "periph2_clk2_sel");
++ if (IS_ERR(periph2_clk2_sel)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get periph2_clk2_sel\n",
++ __func__);
++ return PTR_ERR(periph2_clk2_sel);
++ }
++
++ step_clk = devm_clk_get(&pdev->dev, "step");
++ if (IS_ERR(step_clk)) {
++ dev_err(busfreq_dev,
++ "%s: failed to get step_clk\n",
++ __func__);
++ return PTR_ERR(periph2_clk2_sel);
++ }
++
++ }
++
++ err = sysfs_create_file(&busfreq_dev->kobj, &dev_attr_enable.attr);
++ if (err) {
++ dev_err(busfreq_dev,
++ "Unable to register sysdev entry for BUSFREQ");
++ return err;
++ }
++ err = sysfs_create_file(&busfreq_dev->kobj, &dev_attr_vpu352.attr);
++ if (err) {
++ dev_err(busfreq_dev,
++ "Unable to register sysdev entry for BUSFREQ");
++ return err;
++ }
++
++ if (of_property_read_u32(pdev->dev.of_node, "fsl,max_ddr_freq",
++ &ddr_normal_rate)) {
++ dev_err(busfreq_dev, "max_ddr_freq entry missing\n");
++ return -EINVAL;
++ }
++#endif
++
++ high_bus_freq_mode = 1;
++ med_bus_freq_mode = 0;
++ low_bus_freq_mode = 0;
++ audio_bus_freq_mode = 0;
++ ultra_low_bus_freq_mode = 0;
++
++#ifdef CONFIG_ARM_IMX6Q_CPUFREQ
++ bus_freq_scaling_is_active = 1;
++ bus_freq_scaling_initialized = 1;
++
++ ddr_low_rate = LPAPM_CLK;
++ if (cpu_is_imx6q()) {
++ if (of_property_read_u32(pdev->dev.of_node, "fsl,med_ddr_freq",
++ &ddr_med_rate)) {
++ dev_info(busfreq_dev,
++ "DDR medium rate not supported.\n");
++ ddr_med_rate = ddr_normal_rate;
++ }
++ }
++
++ INIT_DELAYED_WORK(&low_bus_freq_handler, reduce_bus_freq_handler);
++ INIT_DELAYED_WORK(&bus_freq_daemon, bus_freq_daemon_handler);
++ register_pm_notifier(&imx_bus_freq_pm_notifier);
++ register_reboot_notifier(&imx_busfreq_reboot_notifier);
++
++ if (cpu_is_imx6sl())
++ err = init_mmdc_lpddr2_settings(pdev);
++ else
++ err = init_mmdc_ddr3_settings(pdev);
++ if (err) {
++ dev_err(busfreq_dev, "Busfreq init of MMDC failed\n");
++ return err;
++ }
++#endif
++ return 0;
++}
++
++static const struct of_device_id imx6_busfreq_ids[] = {
++ { .compatible = "fsl,imx6_busfreq", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver busfreq_driver = {
++ .driver = {
++ .name = "imx6_busfreq",
++ .owner = THIS_MODULE,
++ .of_match_table = imx6_busfreq_ids,
++ },
++ .probe = busfreq_probe,
++};
++
++/*!
++ * Initialise the busfreq_driver.
++ *
++ * @return The function always returns 0.
++ */
++
++static int __init busfreq_init(void)
++{
++ if (vpu352) {
++ printk(KERN_INFO "VPU@352Mhz activated. Bus freq driver module not loading\n");
++ return 0;
++ }
++
++ if (platform_driver_register(&busfreq_driver) != 0)
++ return -ENODEV;
++
++ printk(KERN_INFO "Bus freq driver module loaded\n");
++
++ return 0;
++}
++
++static void __exit busfreq_cleanup(void)
++{
++#ifdef CONFIG_ARM_IMX6Q_CPUFREQ
++ sysfs_remove_file(&busfreq_dev->kobj, &dev_attr_enable.attr);
++
++ bus_freq_scaling_initialized = 0;
++#endif
++ /* Unregister the device structure */
++ platform_driver_unregister(&busfreq_driver);
++}
++
++__setup("vpu352=", vpu352_setup);
++module_init(busfreq_init);
++module_exit(busfreq_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("BusFreq driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/arch/arm/mach-imx/busfreq_lpddr2.c linux-xbian-imx6/arch/arm/mach-imx/busfreq_lpddr2.c
+--- linux-4.1.3/arch/arm/mach-imx/busfreq_lpddr2.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/arch/arm/mach-imx/busfreq_lpddr2.c 2015-07-27 23:13:01.073153409 +0200
+@@ -0,0 +1,183 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file busfreq_lpddr2.c
++ *
++ * @brief iMX6 LPDDR2 frequency change specific file.
++ *
++ * @ingroup PM
++ */
++#include <asm/cacheflush.h>
++#include <asm/fncpy.h>
++#include <asm/io.h>
++#include <asm/mach/map.h>
++#include <asm/mach-types.h>
++#include <asm/tlb.h>
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/genalloc.h>
++#include <linux/interrupt.h>
++#include <linux/irqchip/arm-gic.h>
++#include <linux/kernel.h>
++#include <linux/mutex.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/proc_fs.h>
++#include <linux/sched.h>
++#include <linux/smp.h>
++
++#include "hardware.h"
++
++/* DDR settings */
++static void __iomem *mmdc_base;
++static void __iomem *anatop_base;
++static void __iomem *ccm_base;
++static void __iomem *l2_base;
++static struct device *busfreq_dev;
++static void *ddr_freq_change_iram_base;
++static int curr_ddr_rate;
++
++unsigned long reg_addrs[4];
++
++void (*mx6_change_lpddr2_freq)(u32 ddr_freq, int bus_freq_mode,
++ void *iram_addr) = NULL;
++
++extern unsigned int ddr_normal_rate;
++extern int low_bus_freq_mode;
++extern int ultra_low_bus_freq_mode;
++extern void mx6_lpddr2_freq_change(u32 freq, int bus_freq_mode,
++ void *iram_addr);
++
++
++#define LPDDR2_FREQ_CHANGE_SIZE 0x1000
++
++
++/* change the DDR frequency. */
++int update_lpddr2_freq(int ddr_rate)
++{
++ if (ddr_rate == curr_ddr_rate)
++ return 0;
++
++ dev_dbg(busfreq_dev, "\nBus freq set to %d start...\n", ddr_rate);
++
++ /*
++ * Flush the TLB, to ensure no TLB maintenance occurs
++ * when DDR is in self-refresh.
++ */
++ local_flush_tlb_all();
++ /* Now change DDR frequency. */
++ mx6_change_lpddr2_freq(ddr_rate,
++ (low_bus_freq_mode | ultra_low_bus_freq_mode),
++ reg_addrs);
++
++ curr_ddr_rate = ddr_rate;
++
++ dev_dbg(busfreq_dev, "\nBus freq set to %d done...\n", ddr_rate);
++
++ return 0;
++}
++
++int init_mmdc_lpddr2_settings(struct platform_device *busfreq_pdev)
++{
++ struct platform_device *ocram_dev;
++ unsigned int iram_paddr;
++ struct device_node *node;
++ struct gen_pool *iram_pool;
++
++ busfreq_dev = &busfreq_pdev->dev;
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-mmdc");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-mmdc device tree data!\n");
++ return -EINVAL;
++ }
++ mmdc_base = of_iomap(node, 0);
++ WARN(!mmdc_base, "unable to map mmdc registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-ccm");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-ccm device tree data!\n");
++ return -EINVAL;
++ }
++ ccm_base = of_iomap(node, 0);
++ WARN(!ccm_base, "unable to map ccm registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ l2_base = of_iomap(node, 0);
++ WARN(!l2_base, "unable to map PL310 registers\n");
++
++ node = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop");
++ if (!node) {
++ printk(KERN_ERR "failed to find imx6sl-pl310-cache device tree data!\n");
++ return -EINVAL;
++ }
++ anatop_base = of_iomap(node, 0);
++ WARN(!anatop_base, "unable to map anatop registers\n");
++
++ node = NULL;
++ node = of_find_compatible_node(NULL, NULL, "mmio-sram");
++ if (!node) {
++ dev_err(busfreq_dev, "%s: failed to find ocram node\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ ocram_dev = of_find_device_by_node(node);
++ if (!ocram_dev) {
++ dev_err(busfreq_dev, "failed to find ocram device!\n");
++ return -EINVAL;
++ }
++
++ iram_pool = dev_get_gen_pool(&ocram_dev->dev);
++ if (!iram_pool) {
++ dev_err(busfreq_dev, "iram pool unavailable!\n");
++ return -EINVAL;
++ }
++
++ reg_addrs[0] = (unsigned long)anatop_base;
++ reg_addrs[1] = (unsigned long)ccm_base;
++ reg_addrs[2] = (unsigned long)mmdc_base;
++ reg_addrs[3] = (unsigned long)l2_base;
++
++ ddr_freq_change_iram_base = (void *)gen_pool_alloc(iram_pool,
++ LPDDR2_FREQ_CHANGE_SIZE);
++ if (!ddr_freq_change_iram_base) {
++ dev_err(busfreq_dev,
++ "Cannot alloc iram for ddr freq change code!\n");
++ return -ENOMEM;
++ }
++
++ iram_paddr = gen_pool_virt_to_phys(iram_pool,
++ (unsigned long)ddr_freq_change_iram_base);
++ /*
++ * Need to remap the area here since we want
++ * the memory region to be executable.
++ */
++ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr,
++ LPDDR2_FREQ_CHANGE_SIZE,
++ MT_MEMORY_RWX_NONCACHED);
++ mx6_change_lpddr2_freq = (void *)fncpy(ddr_freq_change_iram_base,
++ &mx6_lpddr2_freq_change, LPDDR2_FREQ_CHANGE_SIZE);
++
++ curr_ddr_rate = ddr_normal_rate;
++
++ return 0;
++}
+diff -Nur linux-4.1.3/arch/arm/mach-imx/clk.h linux-xbian-imx6/arch/arm/mach-imx/clk.h
+--- linux-4.1.3/arch/arm/mach-imx/clk.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/clk.h 2015-07-27 23:13:01.085110746 +0200
+@@ -55,6 +55,34 @@
+ shift, 0, &imx_ccm_lock, share_count);
+ }
+
++static inline void imx_clk_prepare_enable(struct clk *clk)
++{
++ int ret = clk_prepare_enable(clk);
++
++ if (ret)
++ pr_err("failed to prepare and enable clk %s: %d\n",
++ __clk_get_name(clk), ret);
++}
++
++static inline int imx_clk_set_parent(struct clk *clk, struct clk *parent)
++{
++ int ret = clk_set_parent(clk, parent);
++
++ if (ret)
++ pr_err("failed to set parent of clk %s to %s: %d\n",
++ __clk_get_name(clk), __clk_get_name(parent), ret);
++ return ret;
++}
++
++static inline void imx_clk_set_rate(struct clk *clk, unsigned long rate)
++{
++ int ret = clk_set_rate(clk, rate);
++
++ if (ret)
++ pr_err("failed to set rate of clk %s to %ld: %d\n",
++ __clk_get_name(clk), rate, ret);
++}
++
+ struct clk *imx_clk_pfd(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx);
+
+diff -Nur linux-4.1.3/arch/arm/mach-imx/clk-imx6q.c linux-xbian-imx6/arch/arm/mach-imx/clk-imx6q.c
+--- linux-4.1.3/arch/arm/mach-imx/clk-imx6q.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/clk-imx6q.c 2015-07-27 23:13:01.081124967 +0200
+@@ -24,7 +24,6 @@
+ #include "clk.h"
+ #include "common.h"
+ #include "hardware.h"
+-
+ static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
+ static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
+ static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
+@@ -41,6 +40,8 @@
+ static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
+ static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
+ static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
++static const char *ldb_di0_div_sels[] = { "ldb_di0_div_3_5", "ldb_di0_div_7", };
++static const char *ldb_di1_div_sels[] = { "ldb_di1_div_3_5", "ldb_di1_div_7", };
+ static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+ static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+ static const char *ipu2_di0_sels[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
+@@ -119,8 +120,118 @@
+ static unsigned int share_count_ssi1;
+ static unsigned int share_count_ssi2;
+ static unsigned int share_count_ssi3;
++static unsigned int share_count_spdif;
+ static unsigned int share_count_mipi_core_cfg;
+
++static void __iomem *ccm_base;
++
++static void init_ldb_clks(struct clk *new_parent)
++{
++ u32 reg;
++
++ /*
++ * Need to follow a strict procedure when changing the LDB
++ * clock, else we can introduce a glitch. Things to keep in
++ * mind:
++ * 1. The current and new parent clocks must be disabled.
++ * 2. The default clock for ldb_dio_clk is mmdc_ch1 which has
++ * no CG bit.
++ * 3. In the RTL implementation of the LDB_DI_CLK_SEL mux
++ * the top four options are in one mux and the PLL3 option along
++ * with another option is in the second mux. There is third mux
++ * used to decide between the first and second mux.
++ * The code below switches the parent to the bottom mux first
++ * and then manipulates the top mux. This ensures that no glitch
++ * will enter the divider.
++ *
++ * Need to disable MMDC_CH1 clock manually as there is no CG bit
++ * for this clock. The only way to disable this clock is to move
++ * it topll3_sw_clk and then to disable pll3_sw_clk
++ * Make sure periph2_clk2_sel is set to pll3_sw_clk
++ */
++ reg = readl_relaxed(ccm_base + 0x18);
++ reg &= ~(1 << 20);
++ writel_relaxed(reg, ccm_base + 0x18);
++
++ /*
++ * Set MMDC_CH1 mask bit.
++ */
++ reg = readl_relaxed(ccm_base + 0x4);
++ reg |= 1 << 16;
++ writel_relaxed(reg, ccm_base + 0x4);
++
++ /*
++ * Set the periph2_clk_sel to the top mux so that
++ * mmdc_ch1 is from pll3_sw_clk.
++ */
++ reg = readl_relaxed(ccm_base + 0x14);
++ reg |= 1 << 26;
++ writel_relaxed(reg, ccm_base + 0x14);
++
++ /*
++ * Wait for the clock switch.
++ */
++ while (readl_relaxed(ccm_base + 0x48))
++ ;
++
++ /*
++ * Disable pll3_sw_clk by selecting the bypass clock source.
++ */
++ reg = readl_relaxed(ccm_base + 0xc);
++ reg |= 1 << 0;
++ writel_relaxed(reg, ccm_base + 0xc);
++
++ /*
++ * Set the ldb_di0_clk and ldb_di1_clk to 111b.
++ */
++ reg = readl_relaxed(ccm_base + 0x2c);
++ reg |= ((7 << 9) | (7 << 12));
++ writel_relaxed(reg, ccm_base + 0x2c);
++
++ /*
++ * Set the ldb_di0_clk and ldb_di1_clk to 100b.
++ */
++ reg = readl_relaxed(ccm_base + 0x2c);
++ reg &= ~((7 << 9) | (7 << 12));
++ reg |= ((4 << 9) | (4 << 12));
++ writel_relaxed(reg, ccm_base + 0x2c);
++
++ /*
++ * Perform the LDB parent clock switch.
++ */
++ imx_clk_set_parent(clk[IMX6QDL_CLK_LDB_DI0_SEL], new_parent);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_LDB_DI1_SEL], new_parent);
++
++ /*
++ * Unbypass pll3_sw_clk.
++ */
++ reg = readl_relaxed(ccm_base + 0xc);
++ reg &= ~(1 << 0);
++ writel_relaxed(reg, ccm_base + 0xc);
++
++ /*
++ * Set the periph2_clk_sel back to the bottom mux so that
++ * mmdc_ch1 is from its original parent.
++ */
++ reg = readl_relaxed(ccm_base + 0x14);
++ reg &= ~(1 << 26);
++ writel_relaxed(reg, ccm_base + 0x14);
++
++ /*
++ * Wait for the clock switch.
++ */
++ while (readl_relaxed(ccm_base + 0x48))
++ ;
++
++ /*
++ * Clear MMDC_CH1 mask bit.
++ */
++ reg = readl_relaxed(ccm_base + 0x4);
++ reg &= ~(1 << 16);
++ writel_relaxed(reg, ccm_base + 0x4);
++
++}
++
+ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ {
+ struct device_node *np;
+@@ -174,13 +285,13 @@
+ clk[IMX6QDL_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
+
+ /* Do not bypass PLLs initially */
+- clk_set_parent(clk[IMX6QDL_PLL1_BYPASS], clk[IMX6QDL_CLK_PLL1]);
+- clk_set_parent(clk[IMX6QDL_PLL2_BYPASS], clk[IMX6QDL_CLK_PLL2]);
+- clk_set_parent(clk[IMX6QDL_PLL3_BYPASS], clk[IMX6QDL_CLK_PLL3]);
+- clk_set_parent(clk[IMX6QDL_PLL4_BYPASS], clk[IMX6QDL_CLK_PLL4]);
+- clk_set_parent(clk[IMX6QDL_PLL5_BYPASS], clk[IMX6QDL_CLK_PLL5]);
+- clk_set_parent(clk[IMX6QDL_PLL6_BYPASS], clk[IMX6QDL_CLK_PLL6]);
+- clk_set_parent(clk[IMX6QDL_PLL7_BYPASS], clk[IMX6QDL_CLK_PLL7]);
++ imx_clk_set_parent(clk[IMX6QDL_PLL1_BYPASS], clk[IMX6QDL_CLK_PLL1]);
++ imx_clk_set_parent(clk[IMX6QDL_PLL2_BYPASS], clk[IMX6QDL_CLK_PLL2]);
++ imx_clk_set_parent(clk[IMX6QDL_PLL3_BYPASS], clk[IMX6QDL_CLK_PLL3]);
++ imx_clk_set_parent(clk[IMX6QDL_PLL4_BYPASS], clk[IMX6QDL_CLK_PLL4]);
++ imx_clk_set_parent(clk[IMX6QDL_PLL5_BYPASS], clk[IMX6QDL_CLK_PLL5]);
++ imx_clk_set_parent(clk[IMX6QDL_PLL6_BYPASS], clk[IMX6QDL_CLK_PLL6]);
++ imx_clk_set_parent(clk[IMX6QDL_PLL7_BYPASS], clk[IMX6QDL_CLK_PLL7]);
+
+ clk[IMX6QDL_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", base + 0x00, 13);
+ clk[IMX6QDL_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", base + 0x30, 13);
+@@ -259,7 +370,7 @@
+ clk[IMX6QDL_CLK_PLL5_VIDEO_DIV] = clk_register_divider_table(NULL, "pll5_video_div", "pll5_post_div", CLK_SET_RATE_PARENT, base + 0x170, 30, 2, 0, video_div_table, &imx_ccm_lock);
+
+ np = ccm_node;
+- base = of_iomap(np, 0);
++ ccm_base = base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ imx6q_pm_set_ccm_base(base);
+@@ -286,6 +397,8 @@
+ clk[IMX6QDL_CLK_IPU2_SEL] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
+ clk[IMX6QDL_CLK_LDB_DI0_SEL] = imx_clk_mux_flags("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_LDB_DI1_SEL] = imx_clk_mux_flags("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels), CLK_SET_RATE_PARENT);
++ clk[IMX6QDL_CLK_LDB_DI0_DIV_SEL] = imx_clk_mux_flags("ldb_di0_div_sel", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels), CLK_SET_RATE_PARENT);
++ clk[IMX6QDL_CLK_LDB_DI1_DIV_SEL] = imx_clk_mux_flags("ldb_di1_div_sel", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL] = imx_clk_mux_flags("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL] = imx_clk_mux_flags("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+ clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL] = imx_clk_mux_flags("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels), CLK_SET_RATE_PARENT);
+@@ -335,9 +448,9 @@
+ clk[IMX6QDL_CLK_IPU1_PODF] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
+ clk[IMX6QDL_CLK_IPU2_PODF] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
+ clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
+- clk[IMX6QDL_CLK_LDB_DI0_PODF] = imx_clk_divider_flags("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1, 0);
++ clk[IMX6QDL_CLK_LDB_DI0_DIV_7] = imx_clk_fixed_factor("ldb_di0_div_7", "ldb_di0_sel", 1, 7);
+ clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
+- clk[IMX6QDL_CLK_LDB_DI1_PODF] = imx_clk_divider_flags("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1, 0);
++ clk[IMX6QDL_CLK_LDB_DI1_DIV_7] = imx_clk_fixed_factor("ldb_di1_div_7", "ldb_di1_sel", 1, 7);
+ clk[IMX6QDL_CLK_IPU1_DI0_PRE] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
+ clk[IMX6QDL_CLK_IPU1_DI1_PRE] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
+ clk[IMX6QDL_CLK_IPU2_DI0_PRE] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
+@@ -378,6 +491,8 @@
+ clk[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
+ clk[IMX6QDL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
+ clk[IMX6QDL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20);
++ clk[IMX6QDL_CLK_DCIC1] = imx_clk_gate2("dcic1", "ipu1_podf", base + 0x68, 24);
++ clk[IMX6QDL_CLK_DCIC2] = imx_clk_gate2("dcic2", "ipu2_podf", base + 0x68, 26);
+ clk[IMX6QDL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0);
+ clk[IMX6QDL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2);
+ clk[IMX6QDL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4);
+@@ -414,9 +529,9 @@
+ clk[IMX6QDL_CLK_IPU1_DI1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
+ clk[IMX6QDL_CLK_IPU2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
+ clk[IMX6QDL_CLK_IPU2_DI0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
+- clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
+- clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
+ clk[IMX6QDL_CLK_IPU2_DI1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
++ clk[IMX6QDL_CLK_LDB_DI0] = imx_clk_gate2("ldb_di0", "ldb_di0_div_sel", base + 0x74, 12);
++ clk[IMX6QDL_CLK_LDB_DI1] = imx_clk_gate2("ldb_di1", "ldb_di1_div_sel", base + 0x74, 14);
+ clk[IMX6QDL_CLK_HSI_TX] = imx_clk_gate2_shared("hsi_tx", "hsi_tx_podf", base + 0x74, 16, &share_count_mipi_core_cfg);
+ clk[IMX6QDL_CLK_MIPI_CORE_CFG] = imx_clk_gate2_shared("mipi_core_cfg", "video_27m", base + 0x74, 16, &share_count_mipi_core_cfg);
+ clk[IMX6QDL_CLK_MIPI_IPG] = imx_clk_gate2_shared("mipi_ipg", "ipg", base + 0x74, 16, &share_count_mipi_core_cfg);
+@@ -446,7 +561,8 @@
+ clk[IMX6QDL_CLK_SATA] = imx_clk_gate2("sata", "ahb", base + 0x7c, 4);
+ clk[IMX6QDL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
+ clk[IMX6QDL_CLK_SPBA] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
+- clk[IMX6QDL_CLK_SPDIF] = imx_clk_gate2("spdif", "spdif_podf", base + 0x7c, 14);
++ clk[IMX6QDL_CLK_SPDIF] = imx_clk_gate2_shared("spdif", "spdif_podf", base + 0x7c, 14, &share_count_spdif);
++ clk[IMX6QDL_CLK_SPDIF_GCLK] = imx_clk_gate2_shared("spdif_gclk", "ipg", base + 0x7c, 14, &share_count_spdif);
+ clk[IMX6QDL_CLK_SSI1_IPG] = imx_clk_gate2_shared("ssi1_ipg", "ipg", base + 0x7c, 18, &share_count_ssi1);
+ clk[IMX6QDL_CLK_SSI2_IPG] = imx_clk_gate2_shared("ssi2_ipg", "ipg", base + 0x7c, 20, &share_count_ssi2);
+ clk[IMX6QDL_CLK_SSI3_IPG] = imx_clk_gate2_shared("ssi3_ipg", "ipg", base + 0x7c, 22, &share_count_ssi3);
+@@ -479,54 +595,104 @@
+ clk_data.clk_num = ARRAY_SIZE(clk);
+ of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+
++ clk_register_clkdev(clk[IMX6QDL_CLK_GPT_3M], "gpt_3m", "imx-gpt.0");
+ clk_register_clkdev(clk[IMX6QDL_CLK_ENET_REF], "enet_ref", NULL);
+
+ if ((imx_get_soc_revision() != IMX_CHIP_REVISION_1_0) ||
+ cpu_is_imx6dl()) {
+- clk_set_parent(clk[IMX6QDL_CLK_LDB_DI0_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
+- clk_set_parent(clk[IMX6QDL_CLK_LDB_DI1_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_LDB_DI0_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_LDB_DI1_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
+ }
+
+- clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
+- clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
+- clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
+- clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
+- clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_SEL], clk[IMX6QDL_CLK_IPU1_DI0_PRE]);
+- clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_SEL], clk[IMX6QDL_CLK_IPU1_DI1_PRE]);
+- clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_SEL], clk[IMX6QDL_CLK_IPU2_DI0_PRE]);
+- clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI1_SEL], clk[IMX6QDL_CLK_IPU2_DI1_PRE]);
++ init_ldb_clks(clk[IMX6QDL_CLK_PLL2_PFD0_352M]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI1_PRE_SEL], clk[IMX6QDL_CLK_PLL5_VIDEO_DIV]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI0_SEL], clk[IMX6QDL_CLK_IPU1_DI0_PRE]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU1_DI1_SEL], clk[IMX6QDL_CLK_IPU1_DI1_PRE]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI0_SEL], clk[IMX6QDL_CLK_IPU2_DI0_PRE]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU2_DI1_SEL], clk[IMX6QDL_CLK_IPU2_DI1_PRE]);
++
++ if (cpu_is_imx6dl())
++ imx_clk_set_parent(clk[IMX6QDL_CLK_IPU1_SEL], clk[IMX6QDL_CLK_PLL3_PFD1_540M]);
+
+ /*
+ * The gpmi needs 100MHz frequency in the EDO/Sync mode,
+ * We can not get the 100MHz from the pll2_pfd0_352m.
+ * So choose pll2_pfd2_396m as enfc_sel's parent.
+ */
+- clk_set_parent(clk[IMX6QDL_CLK_ENFC_SEL], clk[IMX6QDL_CLK_PLL2_PFD2_396M]);
+-
+- for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
+- clk_prepare_enable(clk[clks_init_on[i]]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_ENFC_SEL], clk[IMX6QDL_CLK_PLL2_PFD2_396M]);
+
++ /* gpu clock initilazation */
++ /*
++ * On mx6dl, 2d core clock sources(sel, podf) is from 3d
++ * shader core clock, but 3d shader clock multiplexer of
++ * mx6dl is different. For instance the equivalent of
++ * pll2_pfd_594M on mx6q is pll2_pfd_528M on mx6dl.
++ * Make a note here.
++ */
++#if 0
++ imx_clk_set_parent(clk[IMX6QDL_CLK_GPU3D_SHADER_SEL], clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
++ if (cpu_is_imx6dl()) {
++ imx_clk_set_rate(clk[IMX6QDL_CLK_GPU3D_SHADER], 528000000);
++ /* for mx6dl, change gpu3d_core parent to 594_PFD*/
++ imx_clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL], clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
++ imx_clk_set_rate(clk[IMX6QDL_CLK_GPU3D_CORE], 528000000);
++ /* for mx6dl, change gpu2d_core parent to 594_PFD*/
++ imx_clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL], clk[IMX6QDL_CLK_PLL2_PFD1_594M]);
++ imx_clk_set_rate(clk[IMX6QDL_CLK_GPU2D_CORE], 528000000);
++ } else if (cpu_is_imx6q()) {
++ imx_clk_set_rate(clk[IMX6QDL_CLK_GPU3D_SHADER], 594000000);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_GPU3D_CORE_SEL], clk[IMX6QDL_CLK_MMDC_CH0_AXI]);
++ imx_clk_set_rate(clk[IMX6QDL_CLK_GPU3D_CORE], 528000000);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_GPU2D_CORE_SEL], clk[IMX6QDL_CLK_PLL3_USB_OTG]);
++ imx_clk_set_rate(clk[IMX6QDL_CLK_GPU2D_CORE], 480000000);
++ }
++#endif
+ if (IS_ENABLED(CONFIG_USB_MXS_PHY)) {
+- clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY1_GATE]);
+- clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY2_GATE]);
++ imx_clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY1_GATE]);
++ imx_clk_prepare_enable(clk[IMX6QDL_CLK_USBPHY2_GATE]);
+ }
+
+ /*
+ * Let's initially set up CLKO with OSC24M, since this configuration
+ * is widely used by imx6q board designs to clock audio codec.
+ */
+- ret = clk_set_parent(clk[IMX6QDL_CLK_CKO2_SEL], clk[IMX6QDL_CLK_OSC]);
++ ret = imx_clk_set_parent(clk[IMX6QDL_CLK_CKO2_SEL], clk[IMX6QDL_CLK_OSC]);
+ if (!ret)
+- ret = clk_set_parent(clk[IMX6QDL_CLK_CKO], clk[IMX6QDL_CLK_CKO2]);
++ ret = imx_clk_set_parent(clk[IMX6QDL_CLK_CKO], clk[IMX6QDL_CLK_CKO2]);
+ if (ret)
+ pr_warn("failed to set up CLKO: %d\n", ret);
+
+ /* Audio-related clocks configuration */
+- clk_set_parent(clk[IMX6QDL_CLK_SPDIF_SEL], clk[IMX6QDL_CLK_PLL3_PFD3_454M]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_SPDIF_SEL], clk[IMX6QDL_CLK_PLL3_PFD3_454M]);
+
+ /* All existing boards with PCIe use LVDS1 */
+ if (IS_ENABLED(CONFIG_PCI_IMX6))
+- clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]);
++ imx_clk_set_parent(clk[IMX6QDL_CLK_LVDS1_SEL], clk[IMX6QDL_CLK_SATA_REF_100M]);
++
++ /*
++ * Enable clocks only after both parent and rate are all initialized
++ * as needed
++ */
++ for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
++ imx_clk_prepare_enable(clk[clks_init_on[i]]);
++
++ /*
++ * If VPU 352M is enabled, then PLL2_PDF2 need to be
++ * set to 352M, cpufreq will be disabled as VDDSOC/PU
++ * need to be at highest voltage, scaling cpu freq is
++ * not saving any power, and busfreq will be also disabled
++ * as the PLL2_PFD2 is not at default freq, in a word,
++ * all modules that sourceing clk from PLL2_PFD2 will
++ * be impacted.
++ */
++ if (vpu352) {
++ clk_set_rate(clk[IMX6QDL_CLK_PLL2_PFD2_396M], 352000000);
++ clk_set_parent(clk[IMX6QDL_CLK_VPU_AXI_SEL], clk[IMX6QDL_CLK_PLL2_PFD2_396M]);
++ pr_info("VPU 352M is enabled!\n");
++ }
+
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+diff -Nur linux-4.1.3/arch/arm/mach-imx/clk-pllv3.c linux-xbian-imx6/arch/arm/mach-imx/clk-pllv3.c
+--- linux-4.1.3/arch/arm/mach-imx/clk-pllv3.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/clk-pllv3.c 2015-07-27 23:13:01.085110746 +0200
+@@ -23,6 +23,7 @@
+ #define PLL_DENOM_OFFSET 0x20
+
+ #define BM_PLL_POWER (0x1 << 12)
++#define BM_PLL_BYPASS (0x1 << 16)
+ #define BM_PLL_LOCK (0x1 << 31)
+
+ /**
+@@ -237,9 +238,10 @@
+ struct clk_pllv3 *pll = to_clk_pllv3(hw);
+ unsigned long min_rate = parent_rate * 27;
+ unsigned long max_rate = parent_rate * 54;
+- u32 val, div;
++ u32 val, newval, div;
+ u32 mfn, mfd = 1000000;
+ s64 temp64;
++ int ret;
+
+ if (rate < min_rate || rate > max_rate)
+ return -EINVAL;
+@@ -251,13 +253,27 @@
+ mfn = temp64;
+
+ val = readl_relaxed(pll->base);
+- val &= ~pll->div_mask;
+- val |= div;
+- writel_relaxed(val, pll->base);
++
++ /* set the PLL into bypass mode */
++ newval = val | BM_PLL_BYPASS;
++ writel_relaxed(newval, pll->base);
++
++ /* configure the new frequency */
++ newval &= ~pll->div_mask;
++ newval |= div;
++ writel_relaxed(newval, pll->base);
+ writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
+ writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+
+- return clk_pllv3_wait_lock(pll);
++ ret = clk_pllv3_wait_lock(pll);
++ if (ret == 0 && val & BM_PLL_POWER) {
++ /* only if it locked can we switch back to the PLL */
++ newval &= ~BM_PLL_BYPASS;
++ newval |= val & BM_PLL_BYPASS;
++ writel_relaxed(newval, pll->base);
++ }
++
++ return ret;
+ }
+
+ static const struct clk_ops clk_pllv3_av_ops = {
+diff -Nur linux-4.1.3/arch/arm/mach-imx/common.h linux-xbian-imx6/arch/arm/mach-imx/common.h
+--- linux-4.1.3/arch/arm/mach-imx/common.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/common.h 2015-07-27 23:13:01.085110746 +0200
+@@ -56,6 +56,7 @@
+ void mxc_set_cpu_type(unsigned int type);
+ void mxc_restart(enum reboot_mode, const char *);
+ void mxc_arch_reset_init(void __iomem *);
++void mxc_arch_reset_init_dt(void);
+ int mx51_revision(void);
+ int mx53_revision(void);
+ void imx_set_aips(void __iomem *);
+@@ -86,6 +87,8 @@
+ MX3_SLEEP,
+ };
+
++extern int vpu352;
++
+ void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode);
+ void imx_print_silicon_rev(const char *cpu, int srev);
+
+@@ -102,6 +105,7 @@
+ static inline void imx_smp_prepare(void) {}
+ #endif
+ void imx_src_init(void);
++
+ void imx_gpc_pre_suspend(bool arm_power_off);
+ void imx_gpc_post_resume(void);
+ void imx_gpc_mask_all(void);
+diff -Nur linux-4.1.3/arch/arm/mach-imx/ddr3_freq_imx6.S linux-xbian-imx6/arch/arm/mach-imx/ddr3_freq_imx6.S
+--- linux-4.1.3/arch/arm/mach-imx/ddr3_freq_imx6.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/arch/arm/mach-imx/ddr3_freq_imx6.S 2015-07-27 23:13:01.089096525 +0200
+@@ -0,0 +1,893 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/linkage.h>
++
++#define MMDC0_MDPDC 0x4
++#define MMDC0_MDCF0 0x0c
++#define MMDC0_MDCF1 0x10
++#define MMDC0_MDMISC 0x18
++#define MMDC0_MDSCR 0x1c
++#define MMDC0_MAPSR 0x404
++#define MMDC0_MADPCR0 0x410
++#define MMDC0_MPZQHWCTRL 0x800
++#define MMDC1_MPZQHWCTRL 0x4800
++#define MMDC0_MPODTCTRL 0x818
++#define MMDC1_MPODTCTRL 0x4818
++#define MMDC0_MPDGCTRL0 0x83c
++#define MMDC1_MPDGCTRL0 0x483c
++#define MMDC0_MPMUR0 0x8b8
++#define MMDC1_MPMUR0 0x48b8
++
++#define CCM_CBCDR 0x14
++#define CCM_CBCMR 0x18
++#define CCM_CSCMR1 0x1c
++#define CCM_CDHIPR 0x48
++
++#define L2_CACHE_SYNC 0x730
++
++ .align 3
++
++ .macro switch_to_528MHz
++
++ /* check if periph_clk_sel is already set */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq set_ahb_podf_before_switch
++
++ /* change periph_clk to be sourced from pll3_clk. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 20)
++ str r0, [r6, #CCM_CBCDR]
++
++ /*
++ * set the AHB dividers before the switch,
++ * don't change AXI clock divider,
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #0xd00
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update528:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update528
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch3:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch3
++
++ b switch_pre_periph_clk_528
++
++set_ahb_podf_before_switch:
++ /*
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #0xd00
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update528_1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update528_1
++
++switch_pre_periph_clk_528:
++
++ /* now switch pre_periph_clk to PLL2_528MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0xc << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch4:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch4
++
++ .endm
++
++ .macro switch_to_400MHz
++
++ /* check if periph_clk_sel is already set. */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq set_ahb_podf_before_switch1
++
++ /* change periph_clk to be sourced from pll3_clk. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch5:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch5
++
++ b switch_pre_periph_clk_400
++
++set_ahb_podf_before_switch1:
++ /*
++ * set the MMDC_DIV=1, AXI_DIV = 2, AHB_DIV=4,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x9 << 8)
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update400_1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update400_1
++
++switch_pre_periph_clk_400:
++
++ /* now switch pre_periph_clk to PFD_400MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0xc << 16)
++ orr r0, r0, #(0x4 << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch6:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch6
++
++ /*
++ * change AHB divider so that we are at 400/3=133MHz.
++ * don't change AXI clock divider.
++ * set the MMDC_DIV=1, AXI_DIV=2, AHB_DIV=3,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x9 << 8)
++ orr r0, r0, #(1 << 16)
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update400_2:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update400_2
++
++ .endm
++
++ .macro switch_to_50MHz
++
++ /* check if periph_clk_sel is already set. */
++ ldr r0, [r6, #CCM_CBCDR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq switch_pre_periph_clk_50
++
++ /*
++ * set the periph_clk to be sourced from PLL2_PFD_200M
++ * change periph_clk to be sourced from pll3_clk.
++ * ensure PLL3 is the source and set the divider to 1.
++ */
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0x3 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to pll3_main_clk. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch_50:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch_50
++
++switch_pre_periph_clk_50:
++
++ /* now switch pre_periph_clk to PFD_200MHz. */
++ ldr r0, [r6, #CCM_CBCMR]
++ orr r0, r0, #(0xc << 16)
++ str r0, [r6, #CCM_CBCMR]
++
++ /*
++ * set the MMDC_DIV=4, AXI_DIV = 4, AHB_DIV=8,
++ */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(0x18 << 16)
++ orr r0, r0, #(0x3 << 16)
++
++ /*
++ * if changing AHB divider remember to change
++ * the IPGPER divider too below.
++ */
++ orr r0, r0, #0x1d00
++ str r0, [r6, #CCM_CBCDR]
++
++wait_div_update_50:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update_50
++
++ /* now switch periph_clk back. */
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch2:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch2
++
++ .endm
++
++ .macro switch_to_24MHz
++ /*
++ * change the freq now try setting DDR to 24MHz.
++ * source it from the periph_clk2 ensure the
++ * periph_clk2 is sourced from 24MHz and the
++ * divider is 1.
++ */
++
++ ldr r0, [r6, #CCM_CBCMR]
++ bic r0, r0, #(0x3 << 12)
++ orr r0, r0, #(1 << 12)
++ str r0, [r6, #CCM_CBCMR]
++
++ ldr r0, [r6, #CCM_CBCDR]
++ bic r0, r0, #(0x38 << 24)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* now switch periph_clk to 24MHz. */
++ ldr r0, [r6, #CCM_CBCDR]
++ orr r0, r0, #(1 << 25)
++ str r0, [r6, #CCM_CBCDR]
++
++periph_clk_switch1:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne periph_clk_switch1
++
++ /* change all the dividers to 1. */
++ ldr r0, [r6, #CCM_CBCDR]
++ ldr r2, =0x3f1f00
++ bic r0, r0, r2
++ orr r0, r0, #(1 << 8)
++ str r0, [r6, #CCM_CBCDR]
++
++ /* Wait for the divider to change. */
++wait_div_update:
++ ldr r0, [r6, #CCM_CDHIPR]
++ cmp r0, #0
++ bne wait_div_update
++
++ .endm
++
++/*
++ * mx6_ddr3_freq_change
++ *
++ * idle the processor (eg, wait for interrupt).
++ * make sure DDR is in self-refresh.
++ * IRQs are already disabled.
++ */
++ENTRY(mx6_ddr3_freq_change)
++
++ stmfd sp!, {r4-r12}
++
++ /*
++ * r5 -> mmdc_base
++ * r6 -> ccm_base
++ * r7 -> iomux_base
++ * r12 -> l2_base
++ */
++ mov r4, r0
++ mov r8, r1
++ mov r9, r2
++ mov r11, r3
++
++ /*
++ * Get the addresses of the registers.
++ * They are last few entries in the
++ * ddr_settings parameter.
++ * The first entry contains the count,
++ * and each entry is 2 words.
++ */
++ ldr r0, [r1]
++ add r0, r0, #1
++ lsl r0, r0, #3
++ add r1, r0, r1
++ /* mmdc_base. */
++ ldr r5, [r1]
++ add r1, #8
++ /* ccm_base */
++ ldr r6, [r1]
++ add r1, #8
++ /*iomux_base */
++ ldr r7, [r1]
++ add r1, #8
++ /*l2_base */
++ ldr r12, [r1]
++
++ddr_freq_change:
++ /*
++ * make sure no TLB miss will occur when
++ * the DDR is in self refresh. invalidate
++ * TLB single entry to ensure that the
++ * address is not already in the TLB.
++ */
++
++ adr r10, ddr_freq_change
++
++ ldr r2, [r6]
++ ldr r2, [r5]
++ ldr r2, [r7]
++ ldr r2, [r8]
++ ldr r2, [r10]
++ ldr r2, [r11]
++ ldr r2, [r12]
++
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Make sure the L2 buffers are drained.
++ * Sync operation on L2 drains the buffers.
++ */
++ mov r1, #0x0
++ str r1, [r12, #L2_CACHE_SYNC]
++#endif
++
++ /* disable automatic power saving. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* disable MMDC power down timer. */
++ ldr r0, [r5, #MMDC0_MDPDC]
++ bic r0, r0, #(0xff << 8)
++ str r0, [r5, #MMDC0_MDPDC]
++
++ /* delay for a while */
++ ldr r1, =4
++delay1:
++ ldr r2, =0
++cont1:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont1
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay1
++
++ /* set CON_REG */
++ ldr r0, =0x8000
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_set_1:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ bne poll_conreq_set_1
++
++ ldr r0, =0x00008050
++ str r0, [r5, #MMDC0_MDSCR]
++ ldr r0, =0x00008058
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /*
++ * if requested frequency is greater than
++ * 300MHz go to DLL on mode.
++ */
++ ldr r1, =300000000
++ cmp r4, r1
++ bge dll_on_mode
++
++dll_off_mode:
++
++ /* if DLL is currently on, turn it off. */
++ cmp r9, #1
++ beq continue_dll_off_1
++
++ ldr r0, =0x00018031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00018039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r1, =10
++delay1a:
++ ldr r2, =0
++cont1a:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont1a
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay1a
++
++continue_dll_off_1:
++ /* set DVFS - enter self refresh mode */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* de-assert con_req */
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++
++poll_dvfs_set_1:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ bne poll_dvfs_set_1
++
++ ldr r1, =24000000
++ cmp r4, r1
++ beq switch_freq_24
++
++ switch_to_50MHz
++ b continue_dll_off_2
++
++switch_freq_24:
++ switch_to_24MHz
++
++continue_dll_off_2:
++
++ /* set SBS - block ddr accesses */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ orr r0, r0, #(1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ /* clear DVFS - exit from self refresh mode */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++poll_dvfs_clear_1:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq poll_dvfs_clear_1
++
++ /* if DLL was previously on, continue DLL off routine. */
++ cmp r9, #1
++ beq continue_dll_off_3
++
++ ldr r0, =0x00018031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00018039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x08208030
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x08208038
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00088032
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x0008803A
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for a while. */
++ ldr r1, =4
++delay_1:
++ ldr r2, =0
++cont_1:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont_1
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay_1
++
++ ldr r0, [r5, #MMDC0_MDCF0]
++ bic r0, r0, #0xf
++ orr r0, r0, #0x3
++ str r0, [r5, #MMDC0_MDCF0]
++
++ ldr r0, [r5, #MMDC0_MDCF1]
++ bic r0, r0, #0x7
++ orr r0, r0, #0x4
++ str r0, [r5, #MMDC0_MDCF1]
++
++ ldr r0, =0x00011680
++ str r0, [r5, #MMDC0_MDMISC]
++
++ /* enable dqs pull down in the IOMUX. */
++ ldr r1, [r11]
++ add r11, r11, #8
++ ldr r2, =0x3028
++update_iomux:
++ ldr r0, [r11, #0x0]
++ ldr r3, [r7, r0]
++ bic r3, r3, r2
++ orr r3, r3, #(0x3 << 12)
++ orr r3, r3, #0x28
++ str r3, [r7, r0]
++ add r11, r11, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_iomux
++
++ /* ODT disabled. */
++ ldr r0, =0x0
++ ldr r2, =MMDC0_MPODTCTRL
++ str r0, [r5, r2]
++ ldr r2, =MMDC1_MPODTCTRL
++ str r0, [r5, r2]
++
++ /* DQS gating disabled. */
++ ldr r2, =MMDC0_MPDGCTRL0
++ ldr r0, [r5, r2]
++ orr r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ ldr r2, =MMDC1_MPDGCTRL0
++ ldr r0, [r5, r2]
++ orr r0, r0, #(0x1 << 29)
++ str r0, [r5, r2]
++
++ /* MMDC0_MAPSR adopt power down enable. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* frc_msr + mu bypass */
++ ldr r0, =0x00000060
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++ ldr r0, =0x00000460
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++ ldr r0, =0x00000c60
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++continue_dll_off_3:
++ /* clear SBS - unblock accesses to DDR. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ bic r0, r0, #(0x1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_clear_1:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ beq poll_conreq_clear_1
++
++ b done
++
++dll_on_mode:
++ /* assert DVFS - enter self refresh mode. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ orr r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* de-assert CON_REQ. */
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* poll DVFS ack. */
++poll_dvfs_set_2:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ bne poll_dvfs_set_2
++
++ ldr r1, =528000000
++ cmp r4, r1
++ beq switch_freq_528
++
++ switch_to_400MHz
++
++ b continue_dll_on
++
++switch_freq_528:
++ switch_to_528MHz
++
++continue_dll_on:
++
++ /* set SBS step-by-step mode. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ orr r0, r0, #( 1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ /* clear DVFS - exit self refresh mode. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #(1 << 21)
++ str r0, [r5, #MMDC0_MAPSR]
++
++poll_dvfs_clear_2:
++ ldr r0, [r5, #MMDC0_MAPSR]
++ and r0, r0, #(1 << 25)
++ cmp r0, #(1 << 25)
++ beq poll_dvfs_clear_2
++
++ /* if DLL is currently off, turn it back on. */
++ cmp r9, #0
++ beq update_calibration_only
++
++ ldr r0, =0xa5390003
++ str r0, [r5, #MMDC0_MPZQHWCTRL]
++ ldr r2, =MMDC1_MPZQHWCTRL
++ str r0, [r5, r2]
++
++ /* enable DQS gating. */
++ ldr r2, =MMDC0_MPDGCTRL0
++ ldr r0, [r5, r2]
++ bic r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ ldr r2, =MMDC1_MPDGCTRL0
++ ldr r0, [r5, r2]
++ bic r0, r0, #(1 << 29)
++ str r0, [r5, r2]
++
++ /* force measure. */
++ ldr r0, =0x00000800
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++ /* delay for while. */
++ ldr r1, =4
++delay5:
++ ldr r2, =0
++cont5:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont5
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay5
++
++ /* disable dqs pull down in the IOMUX. */
++ ldr r1, [r11]
++ add r11, r11, #8
++update_iomux1:
++ ldr r0, [r11, #0x0]
++ ldr r3, [r11, #0x4]
++ str r3, [r7, r0]
++ add r11, r11, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_iomux1
++
++ /* config MMDC timings to 528MHz. */
++ ldr r9, [r8]
++ add r8, r8, #8
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ /* update MISC register: WALAT, RALAT */
++ ldr r0, =0x00001740
++ str r0, [r5, #MMDC0_MDMISC]
++
++ /* configure ddr devices to dll on, odt. */
++ ldr r0, =0x00048031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00048039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for while. */
++ ldr r1, =4
++delay7:
++ ldr r2, =0
++cont7:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont7
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay7
++
++ /* reset dll. */
++ ldr r0, =0x09408030
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x09408038
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* delay for while. */
++ ldr r1, =100
++delay8:
++ ldr r2, =0
++cont8:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, =0x00428031
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x00428039
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ /* issue a zq command. */
++ ldr r0, =0x04008040
++ str r0, [r5, #MMDC0_MDSCR]
++
++ ldr r0, =0x04008048
++ str r0, [r5, #MMDC0_MDSCR]
++
++ /* MMDC ODT enable. */
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++
++ ldr r2, =0x4818
++ str r3, [r5, r2]
++
++ /* delay for while. */
++ ldr r1, =40
++delay15:
++ ldr r2, =0
++cont15:
++ ldr r0, [r5, r2]
++ add r2, r2, #4
++ cmp r2, #16
++ bne cont15
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt delay15
++
++ /* MMDC0_MAPSR adopt power down enable. */
++ ldr r0, [r5, #MMDC0_MAPSR]
++ bic r0, r0, #0x01
++ str r0, [r5, #MMDC0_MAPSR]
++
++ /* enable MMDC power down timer. */
++ ldr r0, [r5, #MMDC0_MDPDC]
++ orr r0, r0, #(0x55 << 8)
++ str r0, [r5, #MMDC0_MDPDC]
++
++ b update_calibration
++
++update_calibration_only:
++ ldr r1, [r8]
++ sub r1, r1, #7
++ add r8, r8, #64
++ b update_calib
++
++update_calibration:
++ /* write the new calibration values. */
++ mov r1, r9
++ sub r1, r1, #7
++
++update_calib:
++ ldr r0, [r8, #0x0]
++ ldr r3, [r8, #0x4]
++ str r3, [r5, r0]
++ add r8, r8, #8
++ sub r1, r1, #1
++ cmp r1, #0
++ bgt update_calib
++
++ /* perform a force measurement. */
++ ldr r0, =0x800
++ str r0, [r5, #MMDC0_MPMUR0]
++ ldr r2, =MMDC1_MPMUR0
++ str r0, [r5, r2]
++
++ /* clear SBS - unblock DDR accesses. */
++ ldr r0, [r5, #MMDC0_MADPCR0]
++ bic r0, r0, #(1 << 8)
++ str r0, [r5, #MMDC0_MADPCR0]
++
++ mov r0, #0x0
++ str r0, [r5, #MMDC0_MDSCR]
++poll_conreq_clear_2:
++ ldr r0, [r5, #MMDC0_MDSCR]
++ and r0, r0, #(0x4 << 12)
++ cmp r0, #(0x4 << 12)
++ beq poll_conreq_clear_2
++
++done:
++ /* restore registers */
++
++ ldmfd sp!, {r4-r12}
++ mov pc, lr
++
++ .type mx6_do_ddr3_freq_change, #object
++ENTRY(mx6_do_ddr_freq_change)
++ .word mx6_ddr3_freq_change
++ .size mx6_ddr3_freq_change, . - mx6_ddr3_freq_change
+diff -Nur linux-4.1.3/arch/arm/mach-imx/Kconfig linux-xbian-imx6/arch/arm/mach-imx/Kconfig
+--- linux-4.1.3/arch/arm/mach-imx/Kconfig 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/Kconfig 2015-07-27 23:13:01.053224513 +0200
+@@ -1,5 +1,6 @@
+ menuconfig ARCH_MXC
+ bool "Freescale i.MX family" if ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7
++ select ARCH_HAS_RESET_CONTROLLER
+ select ARCH_REQUIRE_GPIOLIB
+ select ARM_CPU_SUSPEND if PM
+ select CLKSRC_MMIO
+@@ -8,6 +9,7 @@
+ select PM_OPP if PM
+ select SOC_BUS
+ select SRAM
++ select ZONE_DMA
+ help
+ Support for Freescale MXC/iMX-based family of processors
+
+@@ -58,7 +60,6 @@
+
+ config HAVE_IMX_SRC
+ def_bool y if SMP
+- select ARCH_HAS_RESET_CONTROLLER
+
+ config IMX_HAVE_IOMUX_V1
+ bool
+diff -Nur linux-4.1.3/arch/arm/mach-imx/lpddr2_freq_imx6.S linux-xbian-imx6/arch/arm/mach-imx/lpddr2_freq_imx6.S
+--- linux-4.1.3/arch/arm/mach-imx/lpddr2_freq_imx6.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/arch/arm/mach-imx/lpddr2_freq_imx6.S 2015-07-27 23:13:01.093082305 +0200
+@@ -0,0 +1,484 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/linkage.h>
++
++ .macro mx6sl_switch_to_24MHz
++
++ /*
++ * Set MMDC clock to be sourced from PLL3.
++ * Ensure first periph2_clk2 is sourced from PLL3.
++ * Set the PERIPH2_CLK2_PODF to divide by 2.
++ */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x7
++ orr r6, r6, #0x1
++ str r6, [r2, #0x14]
++
++ /* Select PLL3 to source MMDC. */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* Swtich periph2_clk_sel to run from PLL3. */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch1
++
++ /*
++ * Need to clock gate the 528 PFDs before
++ * powering down PLL2.
++ * Only the PLL2_PFD2_400M should be ON
++ * at this time, so only clock gate that one.
++ */
++ ldr r6, [r3, #0x100]
++ orr r6, r6, #0x800000
++ str r6, [r3, #0x100]
++
++ /*
++ * Set PLL2 to bypass state. We should be here
++ * only if MMDC is not sourced from PLL2.
++ */
++ ldr r6, [r3, #0x30]
++ orr r6, r6, #0x10000
++ str r6, [r3, #0x30]
++
++ ldr r6, [r3, #0x30]
++ orr r6, r6, #0x1000
++ str r6, [r3, #0x30]
++
++ /* Ensure pre_periph2_clk_mux is set to pll2 */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x600000
++ str r6, [r2, #0x18]
++
++ /* Set MMDC clock to be sourced from the bypassed PLL2. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch2:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch2
++
++ /*
++ * Now move MMDC back to periph2_clk2 source.
++ * after selecting PLL2 as the option.
++ * Select PLL2 as the source.
++ */
++ ldr r6, [r2, #0x18]
++ orr r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* set periph2_clk2_podf to divide by 1. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x7
++ str r6, [r2, #0x14]
++
++ /* Now move periph2_clk to periph2_clk2 source */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch3:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch3
++
++ /* Now set the MMDC PODF back to 1.*/
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ str r6, [r2, #0x14]
++
++mmdc_podf0:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf0
++
++ .endm
++
++ .macro ddr_switch_400MHz
++
++ /* Set MMDC divider first, in case PLL3 is at 480MHz. */
++ ldr r6, [r3, #0x10]
++ and r6, r6, #0x10000
++ cmp r6, #0x10000
++ beq pll3_in_bypass
++
++ /* Set MMDC divder to divide by 2. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ orr r6, r6, #0x8
++ str r6, [r2, #0x14]
++
++mmdc_podf:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf
++
++pll3_in_bypass:
++ /*
++ * Check if we are switching between
++ * 400Mhz <-> 100MHz.If so, we should
++ * try to source MMDC from PLL2_200M.
++ */
++ cmp r1, #0
++ beq not_low_bus_freq
++
++ /* Ensure that MMDC is sourced from PLL2 mux first. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch4:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch4
++
++not_low_bus_freq:
++ /* Now ensure periph2_clk2_sel mux is set to PLL3 */
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x100000
++ str r6, [r2, #0x18]
++
++ /* Now switch MMDC to PLL3. */
++ ldr r6, [r2, #0x14]
++ orr r6, r6, #0x4000000
++ str r6, [r2, #0x14]
++
++periph2_clk_switch5:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne periph2_clk_switch5
++
++ /*
++ * Check if PLL2 is already unlocked.
++ * If so do nothing with PLL2.
++ */
++ cmp r1, #0
++ beq pll2_already_on
++
++ /* Now power up PLL2 and unbypass it. */
++ ldr r6, [r3, #0x30]
++ bic r6, r6, #0x1000
++ str r6, [r3, #0x30]
++
++ /* Make sure PLL2 has locked.*/
++wait_for_pll_lock:
++ ldr r6, [r3, #0x30]
++ and r6, r6, #0x80000000
++ cmp r6, #0x80000000
++ bne wait_for_pll_lock
++
++ ldr r6, [r3, #0x30]
++ bic r6, r6, #0x10000
++ str r6, [r3, #0x30]
++
++ /*
++ * Need to enable the 528 PFDs after
++ * powering up PLL2.
++ * Only the PLL2_PFD2_400M should be ON
++ * as it feeds the MMDC. Rest should have
++ * been managed by clock code.
++ */
++ ldr r6, [r3, #0x100]
++ bic r6, r6, #0x800000
++ str r6, [r3, #0x100]
++
++pll2_already_on:
++ /*
++ * Now switch MMDC clk back to pll2_mux option.
++ * Ensure pre_periph2_clk2 is set to pll2_pfd_400M.
++ * If switching to audio DDR freq, set the
++ * pre_periph2_clk2 to PLL2_PFD_200M
++ */
++ ldr r6, =400000000
++ cmp r6, r0
++ bne use_pll2_pfd_200M
++
++ ldr r6, [r2, #0x18]
++ bic r6, r6, #0x600000
++ orr r6, r6, #0x200000
++ str r6, [r2, #0x18]
++ ldr r6, =400000000
++ b cont2
++
++use_pll2_pfd_200M:
++ ldr r6, [r2, #0x18]
++ orr r6, r6, #0x600000
++ str r6, [r2, #0x18]
++ ldr r6, =200000000
++
++cont2:
++ ldr r4, [r2, #0x14]
++ bic r4, r4, #0x4000000
++ str r4, [r2, #0x14]
++
++periph2_clk_switch6:
++ ldr r4, [r2, #0x48]
++ cmp r4, #0
++ bne periph2_clk_switch6
++
++change_divider_only:
++ /*
++ * Calculate the MMDC divider
++ * based on the requested freq.
++ */
++ ldr r4, =0
++Loop2:
++ sub r6, r6, r0
++ cmp r6, r0
++ blt Div_Found
++ add r4, r4, #1
++ bgt Loop2
++
++ /* Shift divider into correct offset. */
++ lsl r4, r4, #3
++Div_Found:
++ /* Set the MMDC PODF. */
++ ldr r6, [r2, #0x14]
++ bic r6, r6, #0x38
++ orr r6, r6, r4
++ str r6, [r2, #0x14]
++
++mmdc_podf1:
++ ldr r6, [r2, #0x48]
++ cmp r6, #0
++ bne mmdc_podf1
++
++ .endm
++
++ .macro mmdc_clk_lower_100MHz
++
++ /*
++ * Prior to reducing the DDR frequency (at 528/400 MHz),
++ * read the Measure unit count bits (MU_UNIT_DEL_NUM)
++ */
++ ldr r5, =0x8B8
++ ldr r6, [r8, r5]
++ /* Original MU unit count */
++ mov r6, r6, LSR #16
++ ldr r4, =0x3FF
++ and r6, r6, r4
++ /* Original MU unit count * 2 */
++ mov r7, r6, LSL #1
++ /*
++ * Bypass the automatic measure unit when below 100 MHz
++ * by setting the Measure unit bypass enable bit (MU_BYP_EN)
++ */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x400
++ str r6, [r8, r5]
++ /*
++ * Double the measure count value read in step 1 and program it in the
++ * measurement bypass bits (MU_BYP_VAL) of the MMDC PHY Measure Unit
++ * Register for the reduced frequency operation below 100 MHz
++ */
++ ldr r6, [r8, r5]
++ ldr r4, =0x3FF
++ bic r6, r6, r4
++ orr r6, r6, r7
++ str r6, [r8, r5]
++ /* Now perform a Force Measurement. */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x800
++ str r6, [r8, r5]
++ /* Wait for FRC_MSR to clear. */
++force_measure:
++ ldr r6, [r8, r5]
++ and r6, r6, #0x800
++ cmp r6, #0x0
++ bne force_measure
++
++ .endm
++
++ .macro mmdc_clk_above_100MHz
++
++ /* Make sure that the PHY measurement unit is NOT in bypass mode */
++ ldr r5, =0x8B8
++ ldr r6, [r8, r5]
++ bic r6, r6, #0x400
++ str r6, [r8, r5]
++ /* Now perform a Force Measurement. */
++ ldr r6, [r8, r5]
++ orr r6, r6, #0x800
++ str r6, [r8, r5]
++ /* Wait for FRC_MSR to clear. */
++force_measure1:
++ ldr r6, [r8, r5]
++ and r6, r6, #0x800
++ cmp r6, #0x0
++ bne force_measure1
++ .endm
++
++/*
++ * mx6_lpddr2_freq_change
++ *
++ * Make sure DDR is in self-refresh.
++ * IRQs are already disabled.
++ * r0 : DDR freq.
++ * r1: low_bus_freq_mode flag
++ * r2: Pointer to array containing addresses of registers.
++ */
++ .align 3
++ENTRY(mx6_lpddr2_freq_change)
++
++ push {r4-r10}
++
++ mov r4, r2
++ ldr r3, [r4] @ANATOP_BASE_ADDR
++ ldr r2, [r4, #0x4] @CCM_BASE_ADDR
++ ldr r8, [r4, #0x8] @MMDC_P0_BASE_ADDR
++ ldr r7, [r4, #0xC] @L2_BASE_ADDR
++
++lpddr2_freq_change:
++ adr r9, lpddr2_freq_change
++
++ /* Prime all TLB entries. */
++ ldr r6, [r9]
++ ldr r6, [r8]
++ ldr r6, [r3]
++ ldr r6, [r2]
++
++ /* Drain all the L1 buffers. */
++ dsb
++
++#ifdef CONFIG_CACHE_L2X0
++ /*
++ * Need to make sure the buffers in L2 are drained.
++ * Performing a sync operation does this.
++ */
++ mov r6, #0x0
++ str r6, [r7, #0x730]
++#endif
++
++ /*
++ * The second dsb might be needed to keep cache sync (device write)
++ * ordering with the memory accesses before it.
++ */
++ dsb
++ isb
++
++ /* Disable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ /* MMDC0_MDPDC disable power down timer */
++ ldr r6, [r8, #0x4]
++ bic r6, r6, #0xff00
++ str r6, [r8, #0x4]
++
++ /* Delay for a while */
++ ldr r10, =10
++delay1:
++ ldr r7, =0
++cont1:
++ ldr r6, [r8, r7]
++ add r7, r7, #4
++ cmp r7, #16
++ bne cont1
++ sub r10, r10, #1
++ cmp r10, #0
++ bgt delay1
++
++ /* Make the DDR explicitly enter self-refresh. */
++ ldr r6, [r8, #0x404]
++ orr r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_set_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ bne poll_dvfs_set_1
++
++ /* set SBS step-by-step mode */
++ ldr r6, [r8, #0x410]
++ orr r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++ ldr r10, =100000000
++ cmp r0, r10
++ bgt set_ddr_mu_above_100
++ mmdc_clk_lower_100MHz
++
++set_ddr_mu_above_100:
++ ldr r10, =24000000
++ cmp r0, r10
++ beq set_to_24MHz
++
++ ddr_switch_400MHz
++
++ ldr r10,=100000000
++ cmp r0, r10
++ blt done
++ mmdc_clk_above_100MHz
++
++ b done
++
++set_to_24MHz:
++ mx6sl_switch_to_24MHz
++
++done:
++ /* clear DVFS - exit from self refresh mode */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x200000
++ str r6, [r8, #0x404]
++
++poll_dvfs_clear_1:
++ ldr r6, [r8, #0x404]
++ and r6, r6, #0x2000000
++ cmp r6, #0x2000000
++ beq poll_dvfs_clear_1
++
++ /* Enable Automatic power savings. */
++ ldr r6, [r8, #0x404]
++ bic r6, r6, #0x01
++ str r6, [r8, #0x404]
++
++ ldr r10, =24000000
++ cmp r0, r10
++ beq skip_power_down
++
++ /* Enable MMDC power down timer. */
++ ldr r6, [r8, #0x4]
++ orr r6, r6, #0x5500
++ str r6, [r8, #0x4]
++
++skip_power_down:
++ /* clear SBS - unblock DDR accesses */
++ ldr r6, [r8, #0x410]
++ bic r6, r6, #0x100
++ str r6, [r8, #0x410]
++
++ pop {r4-r10}
++
++ /* Restore registers */
++ mov pc, lr
++
++ .type mx6_lpddr2_do_iram, #object
++ENTRY(mx6_lpddr2_do_iram)
++ .word mx6_lpddr2_freq_change
++ .size mx6_lpddr2_freq_change, . - mx6_lpddr2_freq_change
+diff -Nur linux-4.1.3/arch/arm/mach-imx/mach-imx6q.c linux-xbian-imx6/arch/arm/mach-imx/mach-imx6q.c
+--- linux-4.1.3/arch/arm/mach-imx/mach-imx6q.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/mach-imx6q.c 2015-07-27 23:13:01.097068084 +0200
+@@ -265,10 +265,12 @@
+ static void __init imx6q_init_machine(void)
+ {
+ struct device *parent;
++ void __iomem *p;
+
+ imx_print_silicon_rev(cpu_is_imx6dl() ? "i.MX6DL" : "i.MX6Q",
+ imx_get_soc_revision());
+
++ mxc_arch_reset_init_dt();
+ parent = imx_soc_device_init();
+ if (parent == NULL)
+ pr_warn("failed to initialize soc device\n");
+@@ -281,6 +283,12 @@
+ cpu_is_imx6q() ? imx6q_pm_init() : imx6dl_pm_init();
+ imx6q_1588_init();
+ imx6q_axi_init();
++
++ p = ioremap(0x21b0000, SZ_4K);
++ if (p) {
++ writel(0x7f, p + 0x40);
++ iounmap(p);
++ }
+ }
+
+ #define OCOTP_CFG3 0x440
+@@ -330,6 +338,12 @@
+ if (dev_pm_opp_disable(cpu_dev, 852000000))
+ pr_warn("failed to disable 852 MHz OPP\n");
+ }
++ if (vpu352) {
++ if (dev_pm_opp_disable(cpu_dev, 396000000))
++ pr_warn("VPU352: failed to disable 396MHz OPP\n");
++ pr_info("VPU352: remove 396MHz OPP for VPU running at 352MHz!\n");
++ }
++
+ iounmap(base);
+ put_node:
+ of_node_put(np);
+@@ -408,4 +422,5 @@
+ .init_machine = imx6q_init_machine,
+ .init_late = imx6q_init_late,
+ .dt_compat = imx6q_dt_compat,
++ .restart = mxc_restart,
+ MACHINE_END
+diff -Nur linux-4.1.3/arch/arm/mach-imx/Makefile linux-xbian-imx6/arch/arm/mach-imx/Makefile
+--- linux-4.1.3/arch/arm/mach-imx/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/Makefile 2015-07-27 23:13:01.053224513 +0200
+@@ -28,6 +28,12 @@
+ obj-$(CONFIG_MXC_USE_EPIT) += epit.o
+ obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o
+
++obj-y += busfreq-imx6.o
++ifdef CONFIG_ARM_IMX6Q_CPUFREQ
++obj-$(CONFIG_SOC_IMX6Q) += ddr3_freq_imx6.o busfreq_ddr3.o
++obj-$(CONFIG_SOC_IMX6SL) += lpddr2_freq_imx6.o busfreq_lpddr2.o
++endif
++
+ ifeq ($(CONFIG_CPU_IDLE),y)
+ obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o
+ obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
+diff -Nur linux-4.1.3/arch/arm/mach-imx/src.c linux-xbian-imx6/arch/arm/mach-imx/src.c
+--- linux-4.1.3/arch/arm/mach-imx/src.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/src.c 2015-07-27 23:13:01.097068084 +0200
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright 2011-2014 Freescale Semiconductor, Inc.
+ * Copyright 2011 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+@@ -18,6 +18,7 @@
+ #include <linux/smp.h>
+ #include <asm/smp_plat.h>
+ #include "common.h"
++#include "hardware.h"
+
+ #define SRC_SCR 0x000
+ #define SRC_GPR1 0x020
+@@ -32,6 +33,7 @@
+
+ static void __iomem *src_base;
+ static DEFINE_SPINLOCK(scr_lock);
++static bool m4_is_enabled;
+
+ static const int sw_reset_bits[5] = {
+ BP_SRC_SCR_SW_GPU_RST,
+@@ -41,6 +43,11 @@
+ BP_SRC_SCR_SW_IPU2_RST
+ };
+
++bool imx_src_is_m4_enabled(void)
++{
++ return m4_is_enabled;
++}
++
+ static int imx_src_reset_module(struct reset_controller_dev *rcdev,
+ unsigned long sw_reset_idx)
+ {
+@@ -136,6 +143,14 @@
+ */
+ spin_lock(&scr_lock);
+ val = readl_relaxed(src_base + SRC_SCR);
++
++ /* bit 4 is m4c_non_sclr_rst on i.MX6SX */
++ if (cpu_is_imx6sx() && ((val &
++ (1 << BP_SRC_SCR_SW_OPEN_VG_RST)) == 0))
++ m4_is_enabled = true;
++ else
++ m4_is_enabled = false;
++
+ val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE);
+ writel_relaxed(val, src_base + SRC_SCR);
+ spin_unlock(&scr_lock);
+diff -Nur linux-4.1.3/arch/arm/mach-imx/system.c linux-xbian-imx6/arch/arm/mach-imx/system.c
+--- linux-4.1.3/arch/arm/mach-imx/system.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mach-imx/system.c 2015-07-27 23:13:01.101053863 +0200
+@@ -34,6 +34,7 @@
+
+ static void __iomem *wdog_base;
+ static struct clk *wdog_clk;
++static u32 wdog_source = 1; /* use WDOG1 default */
+
+ /*
+ * Reset the system. It is called by machine_restart().
+@@ -50,6 +51,17 @@
+
+ if (cpu_is_mx1())
+ wcr_enable = (1 << 0);
++ /*
++ * Some i.MX6 boards use WDOG2 to reset external pmic in bypass mode,
++ * so do WDOG2 reset here. Do not set SRS, since we will
++ * trigger external POR later. Use WDOG1 to reset in ldo-enable
++ * mode. You can set it by "fsl,wdog-reset" in dts.
++ * For i.MX6SX we have to trigger wdog-reset to reset QSPI-NOR flash to
++ * workaround qspi-nor reboot issue whatever ldo-bypass or not.
++ */
++ else if ((wdog_source == 2 && (cpu_is_imx6q() || cpu_is_imx6dl() ||
++ cpu_is_imx6sl())) || cpu_is_imx6sx())
++ wcr_enable = 0x14;
+ else
+ wcr_enable = (1 << 2);
+
+@@ -89,6 +101,41 @@
+ clk_prepare(wdog_clk);
+ }
+
++void __init mxc_arch_reset_init_dt(void)
++{
++ struct device_node *np = NULL;
++
++ if (cpu_is_imx6q() || cpu_is_imx6dl())
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
++ else if (cpu_is_imx6sl())
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpc");
++
++ if (np)
++ of_property_read_u32(np, "fsl,wdog-reset", &wdog_source);
++ pr_info("Use WDOG%d as reset source\n", wdog_source);
++
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx21-wdt");
++ wdog_base = of_iomap(np, 0);
++ WARN_ON(!wdog_base);
++
++ /* Some i.MX6 boards use WDOG2 to reset board in ldo-bypass mode */
++ if (wdog_source == 2 && (cpu_is_imx6q() || cpu_is_imx6dl() ||
++ cpu_is_imx6sl())) {
++ np = of_find_compatible_node(np, NULL, "fsl,imx21-wdt");
++ wdog_base = of_iomap(np, 0);
++ WARN_ON(!wdog_base);
++ }
++
++ wdog_clk = of_clk_get(np, 0);
++ if (IS_ERR(wdog_clk)) {
++ pr_warn("%s: failed to get wdog clock\n", __func__);
++ wdog_clk = NULL;
++ return;
++ }
++
++ clk_prepare(wdog_clk);
++}
++
+ #ifdef CONFIG_CACHE_L2X0
+ void __init imx_init_l2cache(void)
+ {
+diff -Nur linux-4.1.3/arch/arm/mm/cache-v7.S linux-xbian-imx6/arch/arm/mm/cache-v7.S
+--- linux-4.1.3/arch/arm/mm/cache-v7.S 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/arch/arm/mm/cache-v7.S 2015-07-27 23:13:01.503617563 +0200
+@@ -446,3 +446,5 @@
+
+ @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
+ define_cache_functions v7
++
++ .long v7_dma_flush_range
+diff -Nur linux-4.1.3/block/bfq-cgroup.c linux-xbian-imx6/block/bfq-cgroup.c
+--- linux-4.1.3/block/bfq-cgroup.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/block/bfq-cgroup.c 2015-07-27 23:13:03.600137415 +0200
+@@ -0,0 +1,936 @@
++/*
++ * BFQ: CGROUPS support.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++
++static DEFINE_MUTEX(bfqio_mutex);
++
++static bool bfqio_is_removed(struct bfqio_cgroup *bgrp)
++{
++ return bgrp ? !bgrp->online : false;
++}
++
++static struct bfqio_cgroup bfqio_root_cgroup = {
++ .weight = BFQ_DEFAULT_GRP_WEIGHT,
++ .ioprio = BFQ_DEFAULT_GRP_IOPRIO,
++ .ioprio_class = BFQ_DEFAULT_GRP_CLASS,
++};
++
++static inline void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css)
++{
++ return css ? container_of(css, struct bfqio_cgroup, css) : NULL;
++}
++
++/*
++ * Search the bfq_group for bfqd into the hash table (by now only a list)
++ * of bgrp. Must be called under rcu_read_lock().
++ */
++static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
++ struct bfq_data *bfqd)
++{
++ struct bfq_group *bfqg;
++ void *key;
++
++ hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) {
++ key = rcu_dereference(bfqg->bfqd);
++ if (key == bfqd)
++ return bfqg;
++ }
++
++ return NULL;
++}
++
++static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
++ struct bfq_group *bfqg)
++{
++ struct bfq_entity *entity = &bfqg->entity;
++
++ /*
++ * If the weight of the entity has never been set via the sysfs
++ * interface, then bgrp->weight == 0. In this case we initialize
++ * the weight from the current ioprio value. Otherwise, the group
++ * weight, if set, has priority over the ioprio value.
++ */
++ if (bgrp->weight == 0) {
++ entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio);
++ entity->new_ioprio = bgrp->ioprio;
++ } else {
++ if (bgrp->weight < BFQ_MIN_WEIGHT ||
++ bgrp->weight > BFQ_MAX_WEIGHT) {
++ printk(KERN_CRIT "bfq_group_init_entity: "
++ "bgrp->weight %d\n", bgrp->weight);
++ BUG();
++ }
++ entity->new_weight = bgrp->weight;
++ entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight);
++ }
++ entity->orig_weight = entity->weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
++ entity->my_sched_data = &bfqg->sched_data;
++ bfqg->active_entities = 0;
++}
++
++static inline void bfq_group_set_parent(struct bfq_group *bfqg,
++ struct bfq_group *parent)
++{
++ struct bfq_entity *entity;
++
++ BUG_ON(parent == NULL);
++ BUG_ON(bfqg == NULL);
++
++ entity = &bfqg->entity;
++ entity->parent = parent->my_entity;
++ entity->sched_data = &parent->sched_data;
++}
++
++/**
++ * bfq_group_chain_alloc - allocate a chain of groups.
++ * @bfqd: queue descriptor.
++ * @css: the leaf cgroup_subsys_state this chain starts from.
++ *
++ * Allocate a chain of groups starting from the one belonging to
++ * @cgroup up to the root cgroup. Stop if a cgroup on the chain
++ * to the root has already an allocated group on @bfqd.
++ */
++static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp;
++ struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
++
++ for (; css != NULL; css = css->parent) {
++ bgrp = css_to_bfqio(css);
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ if (bfqg != NULL) {
++ /*
++ * All the cgroups in the path from there to the
++ * root must have a bfq_group for bfqd, so we don't
++ * need any more allocations.
++ */
++ break;
++ }
++
++ bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
++ if (bfqg == NULL)
++ goto cleanup;
++
++ bfq_group_init_entity(bgrp, bfqg);
++ bfqg->my_entity = &bfqg->entity;
++
++ if (leaf == NULL) {
++ leaf = bfqg;
++ prev = leaf;
++ } else {
++ bfq_group_set_parent(prev, bfqg);
++ /*
++ * Build a list of allocated nodes using the bfqd
++ * filed, that is still unused and will be
++ * initialized only after the node will be
++ * connected.
++ */
++ prev->bfqd = bfqg;
++ prev = bfqg;
++ }
++ }
++
++ return leaf;
++
++cleanup:
++ while (leaf != NULL) {
++ prev = leaf;
++ leaf = leaf->bfqd;
++ kfree(prev);
++ }
++
++ return NULL;
++}
++
++/**
++ * bfq_group_chain_link - link an allocated group chain to a cgroup
++ * hierarchy.
++ * @bfqd: the queue descriptor.
++ * @css: the leaf cgroup_subsys_state to start from.
++ * @leaf: the leaf group (to be associated to @cgroup).
++ *
++ * Try to link a chain of groups to a cgroup hierarchy, connecting the
++ * nodes bottom-up, so we can be sure that when we find a cgroup in the
++ * hierarchy that already as a group associated to @bfqd all the nodes
++ * in the path to the root cgroup have one too.
++ *
++ * On locking: the queue lock protects the hierarchy (there is a hierarchy
++ * per device) while the bfqio_cgroup lock protects the list of groups
++ * belonging to the same cgroup.
++ */
++static void bfq_group_chain_link(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css,
++ struct bfq_group *leaf)
++{
++ struct bfqio_cgroup *bgrp;
++ struct bfq_group *bfqg, *next, *prev = NULL;
++ unsigned long flags;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ for (; css != NULL && leaf != NULL; css = css->parent) {
++ bgrp = css_to_bfqio(css);
++ next = leaf->bfqd;
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ BUG_ON(bfqg != NULL);
++
++ spin_lock_irqsave(&bgrp->lock, flags);
++
++ rcu_assign_pointer(leaf->bfqd, bfqd);
++ hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
++ hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
++
++ spin_unlock_irqrestore(&bgrp->lock, flags);
++
++ prev = leaf;
++ leaf = next;
++ }
++
++ BUG_ON(css == NULL && leaf != NULL);
++ if (css != NULL && prev != NULL) {
++ bgrp = css_to_bfqio(css);
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ bfq_group_set_parent(prev, bfqg);
++ }
++}
++
++/**
++ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
++ * @bfqd: queue descriptor.
++ * @cgroup: cgroup being searched for.
++ *
++ * Return a group associated to @bfqd in @cgroup, allocating one if
++ * necessary. When a group is returned all the cgroups in the path
++ * to the root have a group associated to @bfqd.
++ *
++ * If the allocation fails, return the root group: this breaks guarantees
++ * but is a safe fallback. If this loss becomes a problem it can be
++ * mitigated using the equivalent weight (given by the product of the
++ * weights of the groups in the path from @group to the root) in the
++ * root scheduler.
++ *
++ * We allocate all the missing nodes in the path from the leaf cgroup
++ * to the root and we connect the nodes only after all the allocations
++ * have been successful.
++ */
++static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
++ struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++ struct bfq_group *bfqg;
++
++ bfqg = bfqio_lookup_group(bgrp, bfqd);
++ if (bfqg != NULL)
++ return bfqg;
++
++ bfqg = bfq_group_chain_alloc(bfqd, css);
++ if (bfqg != NULL)
++ bfq_group_chain_link(bfqd, css, bfqg);
++ else
++ bfqg = bfqd->root_group;
++
++ return bfqg;
++}
++
++/**
++ * bfq_bfqq_move - migrate @bfqq to @bfqg.
++ * @bfqd: queue descriptor.
++ * @bfqq: the queue to move.
++ * @entity: @bfqq's entity.
++ * @bfqg: the group to move to.
++ *
++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
++ * it on the new one. Avoid putting the entity on the old group idle tree.
++ *
++ * Must be called under the queue lock; the cgroup owning @bfqg must
++ * not disappear (by now this just means that we are called under
++ * rcu_read_lock()).
++ */
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct bfq_entity *entity, struct bfq_group *bfqg)
++{
++ int busy, resume;
++
++ busy = bfq_bfqq_busy(bfqq);
++ resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
++
++ BUG_ON(resume && !entity->on_st);
++ BUG_ON(busy && !resume && entity->on_st &&
++ bfqq != bfqd->in_service_queue);
++
++ if (busy) {
++ BUG_ON(atomic_read(&bfqq->ref) < 2);
++
++ if (!resume)
++ bfq_del_bfqq_busy(bfqd, bfqq, 0);
++ else
++ bfq_deactivate_bfqq(bfqd, bfqq, 0);
++ } else if (entity->on_st)
++ bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
++
++ /*
++ * Here we use a reference to bfqg. We don't need a refcounter
++ * as the cgroup reference will not be dropped, so that its
++ * destroy() callback will not be invoked.
++ */
++ entity->parent = bfqg->my_entity;
++ entity->sched_data = &bfqg->sched_data;
++
++ if (busy && resume)
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++}
++
++/**
++ * __bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bfqd: the queue descriptor.
++ * @bic: the bic to move.
++ * @cgroup: the cgroup to move to.
++ *
++ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
++ * has to make sure that the reference to cgroup is valid across the call.
++ *
++ * NOTE: an alternative approach might have been to store the current
++ * cgroup in bfqq and getting a reference to it, reducing the lookup
++ * time here, at the price of slightly more complex code.
++ */
++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic,
++ struct cgroup_subsys_state *css)
++{
++ struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
++ struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++ struct bfq_entity *entity;
++ struct bfq_group *bfqg;
++ struct bfqio_cgroup *bgrp;
++
++ bgrp = css_to_bfqio(css);
++
++ bfqg = bfq_find_alloc_group(bfqd, css);
++ if (async_bfqq != NULL) {
++ entity = &async_bfqq->entity;
++
++ if (entity->sched_data != &bfqg->sched_data) {
++ bic_set_bfqq(bic, NULL, 0);
++ bfq_log_bfqq(bfqd, async_bfqq,
++ "bic_change_group: %p %d",
++ async_bfqq, atomic_read(&async_bfqq->ref));
++ bfq_put_queue(async_bfqq);
++ }
++ }
++
++ if (sync_bfqq != NULL) {
++ entity = &sync_bfqq->entity;
++ if (entity->sched_data != &bfqg->sched_data)
++ bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
++ }
++
++ return bfqg;
++}
++
++/**
++ * bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bic: the bic being migrated.
++ * @cgroup: the destination cgroup.
++ *
++ * When the task owning @bic is moved to @cgroup, @bic is immediately
++ * moved into its new parent group.
++ */
++static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
++ struct cgroup_subsys_state *css)
++{
++ struct bfq_data *bfqd;
++ unsigned long uninitialized_var(flags);
++
++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++ &flags);
++ if (bfqd != NULL) {
++ __bfq_bic_change_cgroup(bfqd, bic, css);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++}
++
++/**
++ * bfq_bic_update_cgroup - update the cgroup of @bic.
++ * @bic: the @bic to update.
++ *
++ * Make sure that @bic is enqueued in the cgroup of the current task.
++ * We need this in addition to moving bics during the cgroup attach
++ * phase because the task owning @bic could be at its first disk
++ * access or we may end up in the root cgroup as the result of a
++ * memory allocation failure and here we try to move to the right
++ * group.
++ *
++ * Must be called under the queue lock. It is safe to use the returned
++ * value even after the rcu_read_unlock() as the migration/destruction
++ * paths act under the queue lock too. IOW it is impossible to race with
++ * group migration/destruction and end up with an invalid group as:
++ * a) here cgroup has not yet been destroyed, nor its destroy callback
++ * has started execution, as current holds a reference to it,
++ * b) if it is destroyed after rcu_read_unlock() [after current is
++ * migrated to a different cgroup] its attach() callback will have
++ * taken care of remove all the references to the old cgroup data.
++ */
++static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ struct bfq_group *bfqg;
++ struct cgroup_subsys_state *css;
++
++ BUG_ON(bfqd == NULL);
++
++ rcu_read_lock();
++ css = task_css(current, bfqio_cgrp_id);
++ bfqg = __bfq_bic_change_cgroup(bfqd, bic, css);
++ rcu_read_unlock();
++
++ return bfqg;
++}
++
++/**
++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
++ * @st: the service tree being flushed.
++ */
++static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entity = st->first_idle;
++
++ for (; entity != NULL; entity = st->first_idle)
++ __bfq_deactivate_entity(entity, 0);
++}
++
++/**
++ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
++ * @bfqd: the device data structure with the root group.
++ * @entity: the entity to move.
++ */
++static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(bfqq == NULL);
++ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
++ return;
++}
++
++/**
++ * bfq_reparent_active_entities - move to the root group all active
++ * entities.
++ * @bfqd: the device data structure with the root group.
++ * @bfqg: the group to move from.
++ * @st: the service tree with the entities.
++ *
++ * Needs queue_lock to be taken and reference to be valid over the call.
++ */
++static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ struct bfq_service_tree *st)
++{
++ struct rb_root *active = &st->active;
++ struct bfq_entity *entity = NULL;
++
++ if (!RB_EMPTY_ROOT(&st->active))
++ entity = bfq_entity_of(rb_first(active));
++
++ for (; entity != NULL; entity = bfq_entity_of(rb_first(active)))
++ bfq_reparent_leaf_entity(bfqd, entity);
++
++ if (bfqg->sched_data.in_service_entity != NULL)
++ bfq_reparent_leaf_entity(bfqd,
++ bfqg->sched_data.in_service_entity);
++
++ return;
++}
++
++/**
++ * bfq_destroy_group - destroy @bfqg.
++ * @bgrp: the bfqio_cgroup containing @bfqg.
++ * @bfqg: the group being destroyed.
++ *
++ * Destroy @bfqg, making sure that it is not referenced from its parent.
++ */
++static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
++{
++ struct bfq_data *bfqd;
++ struct bfq_service_tree *st;
++ struct bfq_entity *entity = bfqg->my_entity;
++ unsigned long uninitialized_var(flags);
++ int i;
++
++ hlist_del(&bfqg->group_node);
++
++ /*
++ * Empty all service_trees belonging to this group before
++ * deactivating the group itself.
++ */
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
++ st = bfqg->sched_data.service_tree + i;
++
++ /*
++ * The idle tree may still contain bfq_queues belonging
++ * to exited task because they never migrated to a different
++ * cgroup from the one being destroyed now. No one else
++ * can access them so it's safe to act without any lock.
++ */
++ bfq_flush_idle_tree(st);
++
++ /*
++ * It may happen that some queues are still active
++ * (busy) upon group destruction (if the corresponding
++ * processes have been forced to terminate). We move
++ * all the leaf entities corresponding to these queues
++ * to the root_group.
++ * Also, it may happen that the group has an entity
++ * in service, which is disconnected from the active
++ * tree: it must be moved, too.
++ * There is no need to put the sync queues, as the
++ * scheduler has taken no reference.
++ */
++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++ if (bfqd != NULL) {
++ bfq_reparent_active_entities(bfqd, bfqg, st);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++ BUG_ON(!RB_EMPTY_ROOT(&st->active));
++ BUG_ON(!RB_EMPTY_ROOT(&st->idle));
++ }
++ BUG_ON(bfqg->sched_data.next_in_service != NULL);
++ BUG_ON(bfqg->sched_data.in_service_entity != NULL);
++
++ /*
++ * We may race with device destruction, take extra care when
++ * dereferencing bfqg->bfqd.
++ */
++ bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++ if (bfqd != NULL) {
++ hlist_del(&bfqg->bfqd_node);
++ __bfq_deactivate_entity(entity, 0);
++ bfq_put_async_queues(bfqd, bfqg);
++ bfq_put_bfqd_unlock(bfqd, &flags);
++ }
++ BUG_ON(entity->tree != NULL);
++
++ /*
++ * No need to defer the kfree() to the end of the RCU grace
++ * period: we are called from the destroy() callback of our
++ * cgroup, so we can be sure that no one is a) still using
++ * this cgroup or b) doing lookups in it.
++ */
++ kfree(bfqg);
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
++ bfq_end_wr_async_queues(bfqd, bfqg);
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++/**
++ * bfq_disconnect_groups - disconnect @bfqd from all its groups.
++ * @bfqd: the device descriptor being exited.
++ *
++ * When the device exits we just make sure that no lookup can return
++ * the now unused group structures. They will be deallocated on cgroup
++ * destruction.
++ */
++static void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ bfq_log(bfqd, "disconnect_groups beginning");
++ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
++ hlist_del(&bfqg->bfqd_node);
++
++ __bfq_deactivate_entity(bfqg->my_entity, 0);
++
++ /*
++ * Don't remove from the group hash, just set an
++ * invalid key. No lookups can race with the
++ * assignment as bfqd is being destroyed; this
++ * implies also that new elements cannot be added
++ * to the list.
++ */
++ rcu_assign_pointer(bfqg->bfqd, NULL);
++
++ bfq_log(bfqd, "disconnect_groups: put async for group %p",
++ bfqg);
++ bfq_put_async_queues(bfqd, bfqg);
++ }
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++ struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
++ struct bfq_group *bfqg = bfqd->root_group;
++
++ bfq_put_async_queues(bfqd, bfqg);
++
++ spin_lock_irq(&bgrp->lock);
++ hlist_del_rcu(&bfqg->group_node);
++ spin_unlock_irq(&bgrp->lock);
++
++ /*
++ * No need to synchronize_rcu() here: since the device is gone
++ * there cannot be any read-side access to its root_group.
++ */
++ kfree(bfqg);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ struct bfqio_cgroup *bgrp;
++ int i;
++
++ bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node);
++ if (bfqg == NULL)
++ return NULL;
++
++ bfqg->entity.parent = NULL;
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ bgrp = &bfqio_root_cgroup;
++ spin_lock_irq(&bgrp->lock);
++ rcu_assign_pointer(bfqg->bfqd, bfqd);
++ hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
++ spin_unlock_irq(&bgrp->lock);
++
++ return bfqg;
++}
++
++#define SHOW_FUNCTION(__VAR) \
++static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \
++ struct cftype *cftype) \
++{ \
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
++ u64 ret = -ENODEV; \
++ \
++ mutex_lock(&bfqio_mutex); \
++ if (bfqio_is_removed(bgrp)) \
++ goto out_unlock; \
++ \
++ spin_lock_irq(&bgrp->lock); \
++ ret = bgrp->__VAR; \
++ spin_unlock_irq(&bgrp->lock); \
++ \
++out_unlock: \
++ mutex_unlock(&bfqio_mutex); \
++ return ret; \
++}
++
++SHOW_FUNCTION(weight);
++SHOW_FUNCTION(ioprio);
++SHOW_FUNCTION(ioprio_class);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__VAR, __MIN, __MAX) \
++static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\
++ struct cftype *cftype, \
++ u64 val) \
++{ \
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css); \
++ struct bfq_group *bfqg; \
++ int ret = -EINVAL; \
++ \
++ if (val < (__MIN) || val > (__MAX)) \
++ return ret; \
++ \
++ ret = -ENODEV; \
++ mutex_lock(&bfqio_mutex); \
++ if (bfqio_is_removed(bgrp)) \
++ goto out_unlock; \
++ ret = 0; \
++ \
++ spin_lock_irq(&bgrp->lock); \
++ bgrp->__VAR = (unsigned short)val; \
++ hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) { \
++ /* \
++ * Setting the ioprio_changed flag of the entity \
++ * to 1 with new_##__VAR == ##__VAR would re-set \
++ * the value of the weight to its ioprio mapping. \
++ * Set the flag only if necessary. \
++ */ \
++ if ((unsigned short)val != bfqg->entity.new_##__VAR) { \
++ bfqg->entity.new_##__VAR = (unsigned short)val; \
++ /* \
++ * Make sure that the above new value has been \
++ * stored in bfqg->entity.new_##__VAR before \
++ * setting the ioprio_changed flag. In fact, \
++ * this flag may be read asynchronously (in \
++ * critical sections protected by a different \
++ * lock than that held here), and finding this \
++ * flag set may cause the execution of the code \
++ * for updating parameters whose value may \
++ * depend also on bfqg->entity.new_##__VAR (in \
++ * __bfq_entity_update_weight_prio). \
++ * This barrier makes sure that the new value \
++ * of bfqg->entity.new_##__VAR is correctly \
++ * seen in that code. \
++ */ \
++ smp_wmb(); \
++ bfqg->entity.ioprio_changed = 1; \
++ } \
++ } \
++ spin_unlock_irq(&bgrp->lock); \
++ \
++out_unlock: \
++ mutex_unlock(&bfqio_mutex); \
++ return ret; \
++}
++
++STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
++STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
++STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
++#undef STORE_FUNCTION
++
++static struct cftype bfqio_files[] = {
++ {
++ .name = "weight",
++ .read_u64 = bfqio_cgroup_weight_read,
++ .write_u64 = bfqio_cgroup_weight_write,
++ },
++ {
++ .name = "ioprio",
++ .read_u64 = bfqio_cgroup_ioprio_read,
++ .write_u64 = bfqio_cgroup_ioprio_write,
++ },
++ {
++ .name = "ioprio_class",
++ .read_u64 = bfqio_cgroup_ioprio_class_read,
++ .write_u64 = bfqio_cgroup_ioprio_class_write,
++ },
++ { }, /* terminate */
++};
++
++static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state
++ *parent_css)
++{
++ struct bfqio_cgroup *bgrp;
++
++ if (parent_css != NULL) {
++ bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
++ if (bgrp == NULL)
++ return ERR_PTR(-ENOMEM);
++ } else
++ bgrp = &bfqio_root_cgroup;
++
++ spin_lock_init(&bgrp->lock);
++ INIT_HLIST_HEAD(&bgrp->group_data);
++ bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
++ bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
++
++ return &bgrp->css;
++}
++
++/*
++ * We cannot support shared io contexts, as we have no means to support
++ * two tasks with the same ioc in two different groups without major rework
++ * of the main bic/bfqq data structures. By now we allow a task to change
++ * its cgroup only if it's the only owner of its ioc; the drawback of this
++ * behavior is that a group containing a task that forked using CLONE_IO
++ * will not be destroyed until the tasks sharing the ioc die.
++ */
++static int bfqio_can_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++ struct io_context *ioc;
++ int ret = 0;
++
++ cgroup_taskset_for_each(task, tset) {
++ /*
++ * task_lock() is needed to avoid races with
++ * exit_io_context()
++ */
++ task_lock(task);
++ ioc = task->io_context;
++ if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
++ /*
++ * ioc == NULL means that the task is either too
++ * young or exiting: if it has still no ioc the
++ * ioc can't be shared, if the task is exiting the
++ * attach will fail anyway, no matter what we
++ * return here.
++ */
++ ret = -EINVAL;
++ task_unlock(task);
++ if (ret)
++ break;
++ }
++
++ return ret;
++}
++
++static void bfqio_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++ struct io_context *ioc;
++ struct io_cq *icq;
++
++ /*
++ * IMPORTANT NOTE: The move of more than one process at a time to a
++ * new group has not yet been tested.
++ */
++ cgroup_taskset_for_each(task, tset) {
++ ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
++ if (ioc) {
++ /*
++ * Handle cgroup change here.
++ */
++ rcu_read_lock();
++ hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node)
++ if (!strncmp(
++ icq->q->elevator->type->elevator_name,
++ "bfq", ELV_NAME_MAX))
++ bfq_bic_change_cgroup(icq_to_bic(icq),
++ css);
++ rcu_read_unlock();
++ put_io_context(ioc);
++ }
++ }
++}
++
++static void bfqio_destroy(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++ struct hlist_node *tmp;
++ struct bfq_group *bfqg;
++
++ /*
++ * Since we are destroying the cgroup, there are no more tasks
++ * referencing it, and all the RCU grace periods that may have
++ * referenced it are ended (as the destruction of the parent
++ * cgroup is RCU-safe); bgrp->group_data will not be accessed by
++ * anything else and we don't need any synchronization.
++ */
++ hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node)
++ bfq_destroy_group(bgrp, bfqg);
++
++ BUG_ON(!hlist_empty(&bgrp->group_data));
++
++ kfree(bgrp);
++}
++
++static int bfqio_css_online(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++ mutex_lock(&bfqio_mutex);
++ bgrp->online = true;
++ mutex_unlock(&bfqio_mutex);
++
++ return 0;
++}
++
++static void bfqio_css_offline(struct cgroup_subsys_state *css)
++{
++ struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++ mutex_lock(&bfqio_mutex);
++ bgrp->online = false;
++ mutex_unlock(&bfqio_mutex);
++}
++
++struct cgroup_subsys bfqio_cgrp_subsys = {
++ .css_alloc = bfqio_create,
++ .css_online = bfqio_css_online,
++ .css_offline = bfqio_css_offline,
++ .can_attach = bfqio_can_attach,
++ .attach = bfqio_attach,
++ .css_free = bfqio_destroy,
++ .legacy_cftypes = bfqio_files,
++};
++#else
++static inline void bfq_init_entity(struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++ entity->weight = entity->new_weight;
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio = entity->new_ioprio;
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->sched_data = &bfqg->sched_data;
++}
++
++static inline struct bfq_group *
++bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++ return bfqd->root_group;
++}
++
++static inline void bfq_bfqq_move(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_entity *entity,
++ struct bfq_group *bfqg)
++{
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++ bfq_put_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++ kfree(bfqd->root_group);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++ struct bfq_group *bfqg;
++ int i;
++
++ bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++ if (bfqg == NULL)
++ return NULL;
++
++ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++ bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++ return bfqg;
++}
++#endif
+diff -Nur linux-4.1.3/block/bfq.h linux-xbian-imx6/block/bfq.h
+--- linux-4.1.3/block/bfq.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/block/bfq.h 2015-07-27 23:13:03.604123194 +0200
+@@ -0,0 +1,811 @@
++/*
++ * BFQ-v7r7 for 4.0.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/ioprio.h>
++#include <linux/rbtree.h>
++
++#define BFQ_IOPRIO_CLASSES 3
++#define BFQ_CL_IDLE_TIMEOUT (HZ/5)
++
++#define BFQ_MIN_WEIGHT 1
++#define BFQ_MAX_WEIGHT 1000
++
++#define BFQ_DEFAULT_QUEUE_IOPRIO 4
++
++#define BFQ_DEFAULT_GRP_WEIGHT 10
++#define BFQ_DEFAULT_GRP_IOPRIO 0
++#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ * @active: tree for active entities (i.e., those backlogged).
++ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
++ * @first_idle: idle entity with minimum F_i.
++ * @last_idle: idle entity with maximum F_i.
++ * @vtime: scheduler virtual time.
++ * @wsum: scheduler weight sum; active and idle entities contribute to it.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own. Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree. All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++ struct rb_root active;
++ struct rb_root idle;
++
++ struct bfq_entity *first_idle;
++ struct bfq_entity *last_idle;
++
++ u64 vtime;
++ unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ * @in_service_entity: entity in service.
++ * @next_in_service: head-of-the-line entity in the scheduler.
++ * @service_tree: array of service trees, one per ioprio_class.
++ *
++ * bfq_sched_data is the basic scheduler queue. It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as
++ * an intermediate queue on a hierarchical setup.
++ * @next_in_service points to the active entity of the sched_data
++ * service trees that will be scheduled next.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_sched_data {
++ struct bfq_entity *in_service_entity;
++ struct bfq_entity *next_in_service;
++ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ * with a given weight.
++ * @weight: weight of the entities that this counter refers to.
++ * @num_active: number of active entities with this weight.
++ * @weights_node: weights tree member (see bfq_data's @queue_weights_tree
++ * and @group_weights_tree).
++ */
++struct bfq_weight_counter {
++ short int weight;
++ unsigned int num_active;
++ struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ * @rb_node: service_tree member.
++ * @weight_counter: pointer to the weight counter associated with this entity.
++ * @on_st: flag, true if the entity is on a tree (either the active or
++ * the idle one of its service_tree).
++ * @finish: B-WF2Q+ finish timestamp (aka F_i).
++ * @start: B-WF2Q+ start timestamp (aka S_i).
++ * @tree: tree the entity is enqueued into; %NULL if not on a tree.
++ * @min_start: minimum start time of the (active) subtree rooted at
++ * this entity; used for O(log N) lookups into active trees.
++ * @service: service received during the last round of service.
++ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
++ * @weight: weight of the queue
++ * @parent: parent entity, for hierarchical scheduling.
++ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
++ * associated scheduler queue, %NULL on leaf nodes.
++ * @sched_data: the scheduler queue this entity belongs to.
++ * @ioprio: the ioprio in use.
++ * @new_weight: when a weight change is requested, the new weight value.
++ * @orig_weight: original weight, used to implement weight boosting
++ * @new_ioprio: when an ioprio change is requested, the new ioprio value.
++ * @ioprio_class: the ioprio_class in use.
++ * @new_ioprio_class: when an ioprio_class change is requested, the new
++ * ioprio_class value.
++ * @ioprio_changed: flag, true when the user requested a weight, ioprio or
++ * ioprio_class change.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy. Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now. Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @ioprio_changed flag. As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ. When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed. All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++ struct rb_node rb_node;
++ struct bfq_weight_counter *weight_counter;
++
++ int on_st;
++
++ u64 finish;
++ u64 start;
++
++ struct rb_root *tree;
++
++ u64 min_start;
++
++ unsigned long service, budget;
++ unsigned short weight, new_weight;
++ unsigned short orig_weight;
++
++ struct bfq_entity *parent;
++
++ struct bfq_sched_data *my_sched_data;
++ struct bfq_sched_data *sched_data;
++
++ unsigned short ioprio, new_ioprio;
++ unsigned short ioprio_class, new_ioprio_class;
++
++ int ioprio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ * @ref: reference counter.
++ * @bfqd: parent bfq_data.
++ * @new_bfqq: shared bfq_queue if queue is cooperating with
++ * one or more other queues.
++ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
++ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
++ * @sort_list: sorted list of pending requests.
++ * @next_rq: if fifo isn't expired, next request to serve.
++ * @queued: nr of requests queued in @sort_list.
++ * @allocated: currently allocated requests.
++ * @meta_pending: pending metadata requests.
++ * @fifo: fifo list of requests in sort_list.
++ * @entity: entity representing this queue in the scheduler.
++ * @max_budget: maximum budget allowed from the feedback mechanism.
++ * @budget_timeout: budget expiration (in jiffies).
++ * @dispatched: number of requests on the dispatch list or inside driver.
++ * @flags: status flags.
++ * @bfqq_list: node for active/idle bfqq list inside our bfqd.
++ * @burst_list_node: node for the device's burst list.
++ * @seek_samples: number of seeks sampled
++ * @seek_total: sum of the distances of the seeks sampled
++ * @seek_mean: mean seek distance
++ * @last_request_pos: position of the last request enqueued
++ * @requests_within_timer: number of consecutive pairs of request completion
++ * and arrival, such that the queue becomes idle
++ * after the completion, but the next request arrives
++ * within an idle time slice; used only if the queue's
++ * IO_bound has been cleared.
++ * @pid: pid of the process owning the queue, used for logging purposes.
++ * @last_wr_start_finish: start time of the current weight-raising period if
++ * the @bfq-queue is being weight-raised, otherwise
++ * finish time of the last weight-raising period
++ * @wr_cur_max_time: current max raising time for this queue
++ * @soft_rt_next_start: minimum time instant such that, only if a new
++ * request is enqueued after this time instant in an
++ * idle @bfq_queue with no outstanding requests, then
++ * the task associated with the queue it is deemed as
++ * soft real-time (see the comments to the function
++ * bfq_bfqq_softrt_next_start())
++ * @last_idle_bklogged: time of the last transition of the @bfq_queue from
++ * idle to backlogged
++ * @service_from_backlogged: cumulative service received from the @bfq_queue
++ * since the last transition from idle to
++ * backlogged
++ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
++ * queue is shared
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it is async or shared between cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++ atomic_t ref;
++ struct bfq_data *bfqd;
++
++ /* fields for cooperating queues handling */
++ struct bfq_queue *new_bfqq;
++ struct rb_node pos_node;
++ struct rb_root *pos_root;
++
++ struct rb_root sort_list;
++ struct request *next_rq;
++ int queued[2];
++ int allocated[2];
++ int meta_pending;
++ struct list_head fifo;
++
++ struct bfq_entity entity;
++
++ unsigned long max_budget;
++ unsigned long budget_timeout;
++
++ int dispatched;
++
++ unsigned int flags;
++
++ struct list_head bfqq_list;
++
++ struct hlist_node burst_list_node;
++
++ unsigned int seek_samples;
++ u64 seek_total;
++ sector_t seek_mean;
++ sector_t last_request_pos;
++
++ unsigned int requests_within_timer;
++
++ pid_t pid;
++ struct bfq_io_cq *bic;
++
++ /* weight-raising fields */
++ unsigned long wr_cur_max_time;
++ unsigned long soft_rt_next_start;
++ unsigned long last_wr_start_finish;
++ unsigned int wr_coeff;
++ unsigned long last_idle_bklogged;
++ unsigned long service_from_backlogged;
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ * @ttime_total: total process thinktime
++ * @ttime_samples: number of thinktime samples
++ * @ttime_mean: average process thinktime
++ */
++struct bfq_ttime {
++ unsigned long last_end_request;
++
++ unsigned long ttime_total;
++ unsigned long ttime_samples;
++ unsigned long ttime_mean;
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ * @icq: associated io_cq structure
++ * @bfqq: array of two process queues, the sync and the async
++ * @ttime: associated @bfq_ttime struct
++ * @wr_time_left: snapshot of the time left before weight raising ends
++ * for the sync queue associated to this process; this
++ * snapshot is taken to remember this value while the weight
++ * raising is suspended because the queue is merged with a
++ * shared queue, and is used to set @raising_cur_max_time
++ * when the queue is split from the shared queue and its
++ * weight is raised again
++ * @saved_idle_window: same purpose as the previous field for the idle
++ * window
++ * @saved_IO_bound: same purpose as the previous two fields for the I/O
++ * bound classification of a queue
++ * @saved_in_large_burst: same purpose as the previous fields for the
++ * value of the field keeping the queue's belonging
++ * to a large burst
++ * @was_in_burst_list: true if the queue belonged to a burst list
++ * before its merge with another cooperating queue
++ * @cooperations: counter of consecutive successful queue merges underwent
++ * by any of the process' @bfq_queues
++ * @failed_cooperations: counter of consecutive failed queue merges of any
++ * of the process' @bfq_queues
++ */
++struct bfq_io_cq {
++ struct io_cq icq; /* must be the first member */
++ struct bfq_queue *bfqq[2];
++ struct bfq_ttime ttime;
++ int ioprio;
++
++ unsigned int wr_time_left;
++ bool saved_idle_window;
++ bool saved_IO_bound;
++
++ bool saved_in_large_burst;
++ bool was_in_burst_list;
++
++ unsigned int cooperations;
++ unsigned int failed_cooperations;
++};
++
++enum bfq_device_speed {
++ BFQ_BFQD_FAST,
++ BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per device data structure.
++ * @queue: request queue for the managed device.
++ * @root_group: root bfq_group for the device.
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ * determining if two or more queues have interleaving
++ * requests (see bfq_close_cooperator()).
++ * @active_numerous_groups: number of bfq_groups containing more than one
++ * active @bfq_entity.
++ * @queue_weights_tree: rbtree of weight counters of @bfq_queues, sorted by
++ * weight. Used to keep track of whether all @bfq_queues
++ * have the same weight. The tree contains one counter
++ * for each distinct weight associated to some active
++ * and not weight-raised @bfq_queue (see the comments to
++ * the functions bfq_weights_tree_[add|remove] for
++ * further details).
++ * @group_weights_tree: rbtree of non-queue @bfq_entity weight counters, sorted
++ * by weight. Used to keep track of whether all
++ * @bfq_groups have the same weight. The tree contains
++ * one counter for each distinct weight associated to
++ * some active @bfq_group (see the comments to the
++ * functions bfq_weights_tree_[add|remove] for further
++ * details).
++ * @busy_queues: number of bfq_queues containing requests (including the
++ * queue in service, even if it is idling).
++ * @busy_in_flight_queues: number of @bfq_queues containing pending or
++ * in-flight requests, plus the @bfq_queue in
++ * service, even if idle but waiting for the
++ * possible arrival of its next sync request. This
++ * field is updated only if the device is rotational,
++ * but used only if the device is also NCQ-capable.
++ * The reason why the field is updated also for non-
++ * NCQ-capable rotational devices is related to the
++ * fact that the value of @hw_tag may be set also
++ * later than when busy_in_flight_queues may need to
++ * be incremented for the first time(s). Taking also
++ * this possibility into account, to avoid unbalanced
++ * increments/decrements, would imply more overhead
++ * than just updating busy_in_flight_queues
++ * regardless of the value of @hw_tag.
++ * @const_seeky_busy_in_flight_queues: number of constantly-seeky @bfq_queues
++ * (that is, seeky queues that expired
++ * for budget timeout at least once)
++ * containing pending or in-flight
++ * requests, including the in-service
++ * @bfq_queue if constantly seeky. This
++ * field is updated only if the device
++ * is rotational, but used only if the
++ * device is also NCQ-capable (see the
++ * comments to @busy_in_flight_queues).
++ * @wr_busy_queues: number of weight-raised busy @bfq_queues.
++ * @queued: number of queued requests.
++ * @rq_in_driver: number of requests dispatched and waiting for completion.
++ * @sync_flight: number of sync requests in the driver.
++ * @max_rq_in_driver: max number of reqs in driver in the last
++ * @hw_tag_samples completed requests.
++ * @hw_tag_samples: nr of samples used to calculate hw_tag.
++ * @hw_tag: flag set to one if the driver is showing a queueing behavior.
++ * @budgets_assigned: number of budgets assigned.
++ * @idle_slice_timer: timer set when idling for the next sequential request
++ * from the queue in service.
++ * @unplug_work: delayed work to restart dispatching on the request queue.
++ * @in_service_queue: bfq_queue in service.
++ * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue.
++ * @last_position: on-disk position of the last served request.
++ * @last_budget_start: beginning of the last budget.
++ * @last_idling_start: beginning of the last idle slice.
++ * @peak_rate: peak transfer rate observed for a budget.
++ * @peak_rate_samples: number of samples used to calculate @peak_rate.
++ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
++ * rescheduling.
++ * @group_list: list of all the bfq_groups active on the device.
++ * @active_list: list of all the bfq_queues active on the device.
++ * @idle_list: list of all the bfq_queues idle on the device.
++ * @bfq_quantum: max number of requests dispatched per dispatch round.
++ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
++ * requests are served in fifo order.
++ * @bfq_back_penalty: weight of backward seeks wrt forward ones.
++ * @bfq_back_max: maximum allowed backward seek.
++ * @bfq_slice_idle: maximum idling time.
++ * @bfq_user_max_budget: user-configured max budget value
++ * (0 for auto-tuning).
++ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
++ * async queues.
++ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
++ * to prevent seeky queues to impose long latencies to well
++ * behaved ones (this also implies that seeky queues cannot
++ * receive guarantees in the service domain; after a timeout
++ * they are charged for the whole allocated budget, to try
++ * to preserve a behavior reasonably fair among them, but
++ * without service-domain guarantees).
++ * @bfq_coop_thresh: number of queue merges after which a @bfq_queue is
++ * no more granted any weight-raising.
++ * @bfq_failed_cooperations: number of consecutive failed cooperation
++ * chances after which weight-raising is restored
++ * to a queue subject to more than bfq_coop_thresh
++ * queue merges.
++ * @bfq_requests_within_timer: number of consecutive requests that must be
++ * issued within the idle time slice to set
++ * again idling to a queue which was marked as
++ * non-I/O-bound (see the definition of the
++ * IO_bound flag for further details).
++ * @last_ins_in_burst: last time at which a queue entered the current
++ * burst of queues being activated shortly after
++ * each other; for more details about this and the
++ * following parameters related to a burst of
++ * activations, see the comments to the function
++ * @bfq_handle_burst.
++ * @bfq_burst_interval: reference time interval used to decide whether a
++ * queue has been activated shortly after
++ * @last_ins_in_burst.
++ * @burst_size: number of queues in the current burst of queue activations.
++ * @bfq_large_burst_thresh: maximum burst size above which the current
++ * queue-activation burst is deemed as 'large'.
++ * @large_burst: true if a large queue-activation burst is in progress.
++ * @burst_list: head of the burst list (as for the above fields, more details
++ * in the comments to the function bfq_handle_burst).
++ * @low_latency: if set to true, low-latency heuristics are enabled.
++ * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised
++ * queue is multiplied.
++ * @bfq_wr_max_time: maximum duration of a weight-raising period (jiffies).
++ * @bfq_wr_rt_max_time: maximum duration for soft real-time processes.
++ * @bfq_wr_min_idle_time: minimum idle period after which weight-raising
++ * may be reactivated for a queue (in jiffies).
++ * @bfq_wr_min_inter_arr_async: minimum period between request arrivals
++ * after which weight-raising may be
++ * reactivated for an already busy queue
++ * (in jiffies).
++ * @bfq_wr_max_softrt_rate: max service-rate for a soft real-time queue,
++ * sectors per seconds.
++ * @RT_prod: cached value of the product R*T used for computing the maximum
++ * duration of the weight raising automatically.
++ * @device_speed: device-speed class for the low-latency heuristic.
++ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions.
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++ struct request_queue *queue;
++
++ struct bfq_group *root_group;
++ struct rb_root rq_pos_tree;
++
++#ifdef CONFIG_CGROUP_BFQIO
++ int active_numerous_groups;
++#endif
++
++ struct rb_root queue_weights_tree;
++ struct rb_root group_weights_tree;
++
++ int busy_queues;
++ int busy_in_flight_queues;
++ int const_seeky_busy_in_flight_queues;
++ int wr_busy_queues;
++ int queued;
++ int rq_in_driver;
++ int sync_flight;
++
++ int max_rq_in_driver;
++ int hw_tag_samples;
++ int hw_tag;
++
++ int budgets_assigned;
++
++ struct timer_list idle_slice_timer;
++ struct work_struct unplug_work;
++
++ struct bfq_queue *in_service_queue;
++ struct bfq_io_cq *in_service_bic;
++
++ sector_t last_position;
++
++ ktime_t last_budget_start;
++ ktime_t last_idling_start;
++ int peak_rate_samples;
++ u64 peak_rate;
++ unsigned long bfq_max_budget;
++
++ struct hlist_head group_list;
++ struct list_head active_list;
++ struct list_head idle_list;
++
++ unsigned int bfq_quantum;
++ unsigned int bfq_fifo_expire[2];
++ unsigned int bfq_back_penalty;
++ unsigned int bfq_back_max;
++ unsigned int bfq_slice_idle;
++ u64 bfq_class_idle_last_service;
++
++ unsigned int bfq_user_max_budget;
++ unsigned int bfq_max_budget_async_rq;
++ unsigned int bfq_timeout[2];
++
++ unsigned int bfq_coop_thresh;
++ unsigned int bfq_failed_cooperations;
++ unsigned int bfq_requests_within_timer;
++
++ unsigned long last_ins_in_burst;
++ unsigned long bfq_burst_interval;
++ int burst_size;
++ unsigned long bfq_large_burst_thresh;
++ bool large_burst;
++ struct hlist_head burst_list;
++
++ bool low_latency;
++
++ /* parameters of the low_latency heuristics */
++ unsigned int bfq_wr_coeff;
++ unsigned int bfq_wr_max_time;
++ unsigned int bfq_wr_rt_max_time;
++ unsigned int bfq_wr_min_idle_time;
++ unsigned long bfq_wr_min_inter_arr_async;
++ unsigned int bfq_wr_max_softrt_rate;
++ u64 RT_prod;
++ enum bfq_device_speed device_speed;
++
++ struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++ BFQ_BFQQ_FLAG_busy = 0, /* has requests or is in service */
++ BFQ_BFQQ_FLAG_wait_request, /* waiting for a request */
++ BFQ_BFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
++ BFQ_BFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
++ BFQ_BFQQ_FLAG_idle_window, /* slice idling enabled */
++ BFQ_BFQQ_FLAG_prio_changed, /* task priority has changed */
++ BFQ_BFQQ_FLAG_sync, /* synchronous queue */
++ BFQ_BFQQ_FLAG_budget_new, /* no completion with this budget */
++ BFQ_BFQQ_FLAG_IO_bound, /*
++ * bfqq has timed-out at least once
++ * having consumed at most 2/10 of
++ * its budget
++ */
++ BFQ_BFQQ_FLAG_in_large_burst, /*
++ * bfqq activated in a large burst,
++ * see comments to bfq_handle_burst.
++ */
++ BFQ_BFQQ_FLAG_constantly_seeky, /*
++ * bfqq has proved to be slow and
++ * seeky until budget timeout
++ */
++ BFQ_BFQQ_FLAG_softrt_update, /*
++ * may need softrt-next-start
++ * update
++ */
++ BFQ_BFQQ_FLAG_coop, /* bfqq is shared */
++ BFQ_BFQQ_FLAG_split_coop, /* shared bfqq will be split */
++ BFQ_BFQQ_FLAG_just_split, /* queue has just been split */
++};
++
++#define BFQ_BFQQ_FNS(name) \
++static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \
++{ \
++ (bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name); \
++} \
++static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq) \
++{ \
++ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \
++}
++
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(prio_changed);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(budget_new);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(in_large_burst);
++BFQ_BFQQ_FNS(constantly_seeky);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(just_split);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
++
++#define bfq_log(bfqd, fmt, args...) \
++ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++ BFQ_BFQQ_TOO_IDLE = 0, /*
++ * queue has been idling for
++ * too long
++ */
++ BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */
++ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */
++ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */
++};
++
++#ifdef CONFIG_CGROUP_BFQIO
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ * both bfq_queues and bfq_groups).
++ * @group_node: node to be inserted into the bfqio_cgroup->group_data
++ * list of the containing cgroup's bfqio_cgroup.
++ * @bfqd_node: node to be inserted into the @bfqd->group_list list
++ * of the groups active on the same device; used for cleanup.
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ * the group, one queue per ioprio value per ioprio_class,
++ * except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ * to avoid too many special cases during group creation/
++ * migration.
++ * @active_entities: number of active entities belonging to the group;
++ * unused for the root group. Used to know whether there
++ * are groups with more than one active @bfq_entity
++ * (see the comments to the function
++ * bfq_bfqq_must_not_expire()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ * o @group_node is protected by the bfqio_cgroup lock, and is accessed
++ * via RCU from its readers.
++ * o @bfqd is protected by the queue lock, RCU is used to access it
++ * from the readers.
++ * o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++ struct bfq_entity entity;
++ struct bfq_sched_data sched_data;
++
++ struct hlist_node group_node;
++ struct hlist_node bfqd_node;
++
++ void *bfqd;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++
++ struct bfq_entity *my_entity;
++
++ int active_entities;
++};
++
++/**
++ * struct bfqio_cgroup - bfq cgroup data structure.
++ * @css: subsystem state for bfq in the containing cgroup.
++ * @online: flag marked when the subsystem is inserted.
++ * @weight: cgroup weight.
++ * @ioprio: cgroup ioprio.
++ * @ioprio_class: cgroup ioprio_class.
++ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
++ * @group_data: list containing the bfq_group belonging to this cgroup.
++ *
++ * @group_data is accessed using RCU, with @lock protecting the updates,
++ * @ioprio and @ioprio_class are protected by @lock.
++ */
++struct bfqio_cgroup {
++ struct cgroup_subsys_state css;
++ bool online;
++
++ unsigned short weight, ioprio, ioprio_class;
++
++ spinlock_t lock;
++ struct hlist_head group_data;
++};
++#else
++struct bfq_group {
++ struct bfq_sched_data sched_data;
++
++ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++ struct bfq_queue *async_idle_bfqq;
++};
++#endif
++
++static inline struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sched_data = entity->sched_data;
++ unsigned int idx = entity->ioprio_class - 1;
++
++ BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++ BUG_ON(sched_data == NULL);
++
++ return sched_data->service_tree + idx;
++}
++
++static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic,
++ bool is_sync)
++{
++ return bic->bfqq[is_sync];
++}
++
++static inline void bic_set_bfqq(struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, bool is_sync)
++{
++ bic->bfqq[is_sync] = bfqq;
++}
++
++static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++ return bic->icq.q->elevator->elevator_data;
++}
++
++/**
++ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
++ * @ptr: a pointer to a bfqd.
++ * @flags: storage for the flags to be saved.
++ *
++ * This function allows bfqg->bfqd to be protected by the
++ * queue lock of the bfqd they reference; the pointer is dereferenced
++ * under RCU, so the storage for bfqd is assured to be safe as long
++ * as the RCU read side critical section does not end. After the
++ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
++ * sure that no other writer accessed it. If we raced with a writer,
++ * the function returns NULL, with the queue unlocked, otherwise it
++ * returns the dereferenced pointer, with the queue locked.
++ */
++static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
++ unsigned long *flags)
++{
++ struct bfq_data *bfqd;
++
++ rcu_read_lock();
++ bfqd = rcu_dereference(*(struct bfq_data **)ptr);
++
++ if (bfqd != NULL) {
++ spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
++ if (*ptr == bfqd)
++ goto out;
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++ }
++
++ bfqd = NULL;
++out:
++ rcu_read_unlock();
++ return bfqd;
++}
++
++static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
++ unsigned long *flags)
++{
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg, int is_sync,
++ struct bfq_io_cq *bic, gfp_t gfp_mask);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg);
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+diff -Nur linux-4.1.3/block/bfq-ioc.c linux-xbian-imx6/block/bfq-ioc.c
+--- linux-4.1.3/block/bfq-ioc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/block/bfq-ioc.c 2015-07-27 23:13:03.600137415 +0200
+@@ -0,0 +1,36 @@
++/*
++ * BFQ: I/O context handling.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++ /* bic->icq is the first member, %NULL will convert to %NULL */
++ return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ *
++ * Queue lock must be held.
++ */
++static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++ struct io_context *ioc)
++{
++ if (ioc)
++ return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
++ return NULL;
++}
+diff -Nur linux-4.1.3/block/bfq-iosched.c linux-xbian-imx6/block/bfq-iosched.c
+--- linux-4.1.3/block/bfq-iosched.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/block/bfq-iosched.c 2015-07-27 23:13:03.600137415 +0200
+@@ -0,0 +1,4223 @@
++/*
++ * Budget Fair Queueing (BFQ) disk scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based on
++ * the slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
++ * measured in number of sectors, to processes instead of time slices. The
++ * device is not granted to the in-service process for a given time slice,
++ * but until it has exhausted its assigned budget. This change from the time
++ * to the service domain allows BFQ to distribute the device throughput
++ * among processes as desired, without any distortion due to ZBR, workload
++ * fluctuations or other factors. BFQ uses an ad hoc internal scheduler,
++ * called B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated to processes. Thanks to the
++ * accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to
++ * I/O-bound processes issuing sequential requests (to boost the
++ * throughput), and yet guarantee a low latency to interactive and soft
++ * real-time applications.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness
++ * with the BFQ Disk I/O Scheduler'',
++ * Proceedings of the 5th Annual International Systems and Storage
++ * Conference (SYSTOR '12), June 2012.
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ * Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ * Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ * First: A Flexible and Accurate Mechanism for Proportional Share
++ * Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "bfq.h"
++#include "blk.h"
++
++/* Max number of dispatches in one round of service. */
++static const int bfq_quantum = 4;
++
++/* Expiration time of sync (0) and async (1) requests, in jiffies. */
++static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = 16 * 1024;
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in jiffies. */
++static int bfq_slice_idle = HZ / 125;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = 16 * 1024;
++static const int bfq_max_budget_async_rq = 4;
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout_sync = HZ / 8;
++static int bfq_timeout_async = HZ / 25;
++
++struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ms), we consider thinktime immediate. */
++#define BFQ_MIN_TT 2
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD 4
++#define BFQ_HW_QUEUE_SAMPLES 32
++
++#define BFQQ_SEEK_THR (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
++
++/* Min samples used for peak rate estimation (for autotuning). */
++#define BFQ_PEAK_RATE_SAMPLES 32
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT 16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ * SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0]
++ * are the reference values for a slow/fast rotational device, whereas
++ * R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for
++ * a slow/fast non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes.
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1536, 10752};
++static int R_fast[2] = {17415, 34791};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT ((struct bfq_service_tree) \
++ { RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq) ((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq) ((rq)->elv.priv[1])
++
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup.c"
++
++#define bfq_class_idle(bfqq) ((bfqq)->entity.ioprio_class ==\
++ IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq) ((bfqq)->entity.ioprio_class ==\
++ IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples) ((samples) > 80)
++
++/*
++ * We regard a request as SYNC, if either it's a read or has the SYNC bit
++ * set (in which case it could also be a direct WRITE).
++ */
++static inline int bfq_bio_sync(struct bio *bio)
++{
++ if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
++ return 1;
++
++ return 0;
++}
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++ if (bfqd->queued != 0) {
++ bfq_log(bfqd, "schedule dispatch");
++ kblockd_schedule_work(&bfqd->unplug_work);
++ }
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now. Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++ struct request *rq1,
++ struct request *rq2,
++ sector_t last)
++{
++ sector_t s1, s2, d1 = 0, d2 = 0;
++ unsigned long back_max;
++#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
++ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
++
++ if (rq1 == NULL || rq1 == rq2)
++ return rq2;
++ if (rq2 == NULL)
++ return rq1;
++
++ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++ return rq1;
++ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++ return rq2;
++ if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++ return rq1;
++ else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++ return rq2;
++
++ s1 = blk_rq_pos(rq1);
++ s2 = blk_rq_pos(rq2);
++
++ /*
++ * By definition, 1KiB is 2 sectors.
++ */
++ back_max = bfqd->bfq_back_max * 2;
++
++ /*
++ * Strict one way elevator _except_ in the case where we allow
++ * short backward seeks which are biased as twice the cost of a
++ * similar forward seek.
++ */
++ if (s1 >= last)
++ d1 = s1 - last;
++ else if (s1 + back_max >= last)
++ d1 = (last - s1) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ1_WRAP;
++
++ if (s2 >= last)
++ d2 = s2 - last;
++ else if (s2 + back_max >= last)
++ d2 = (last - s2) * bfqd->bfq_back_penalty;
++ else
++ wrap |= BFQ_RQ2_WRAP;
++
++ /* Found required data */
++
++ /*
++ * By doing switch() on the bit mask "wrap" we avoid having to
++ * check two variables for all permutations: --> faster!
++ */
++ switch (wrap) {
++ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++ if (d1 < d2)
++ return rq1;
++ else if (d2 < d1)
++ return rq2;
++ else {
++ if (s1 >= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++
++ case BFQ_RQ2_WRAP:
++ return rq1;
++ case BFQ_RQ1_WRAP:
++ return rq2;
++ case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++ default:
++ /*
++ * Since both rqs are wrapped,
++ * start with the one that's further behind head
++ * (--> only *one* back seek required),
++ * since back seek takes more time than forward.
++ */
++ if (s1 <= s2)
++ return rq1;
++ else
++ return rq2;
++ }
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++ sector_t sector, struct rb_node **ret_parent,
++ struct rb_node ***rb_link)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *bfqq = NULL;
++
++ parent = NULL;
++ p = &root->rb_node;
++ while (*p) {
++ struct rb_node **n;
++
++ parent = *p;
++ bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++ /*
++ * Sort strictly based on sector. Smallest to the left,
++ * largest to the right.
++ */
++ if (sector > blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_right;
++ else if (sector < blk_rq_pos(bfqq->next_rq))
++ n = &(*p)->rb_left;
++ else
++ break;
++ p = n;
++ bfqq = NULL;
++ }
++
++ *ret_parent = parent;
++ if (rb_link)
++ *rb_link = p;
++
++ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++ (long long unsigned)sector,
++ bfqq != NULL ? bfqq->pid : 0);
++
++ return bfqq;
++}
++
++static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct rb_node **p, *parent;
++ struct bfq_queue *__bfqq;
++
++ if (bfqq->pos_root != NULL) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++
++ if (bfq_class_idle(bfqq))
++ return;
++ if (!bfqq->next_rq)
++ return;
++
++ bfqq->pos_root = &bfqd->rq_pos_tree;
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++ blk_rq_pos(bfqq->next_rq), &parent, &p);
++ if (__bfqq == NULL) {
++ rb_link_node(&bfqq->pos_node, parent, p);
++ rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++ } else
++ bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++ BUG_ON(!bfqd->hw_tag);
++ /*
++ * For weights to differ, at least one of the trees must contain
++ * at least two nodes.
++ */
++ return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++ (bfqd->queue_weights_tree.rb_node->rb_left ||
++ bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_CGROUP_BFQIO
++ ) ||
++ (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++ (bfqd->group_weights_tree.rb_node->rb_left ||
++ bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++ );
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++ /*
++ * Do not insert if:
++ * - the device does not support queueing;
++ * - the entity is already associated with a counter, which happens if:
++ * 1) the entity is associated with a queue, 2) a request arrival
++ * has caused the queue to become both non-weight-raised, and hence
++ * change its weight, and backlogged; in this respect, each
++ * of the two events causes an invocation of this function,
++ * 3) this is the invocation of this function caused by the second
++ * event. This second invocation is actually useless, and we handle
++ * this fact by exiting immediately. More efficient or clearer
++ * solutions might possibly be adopted.
++ */
++ if (!bfqd->hw_tag || entity->weight_counter)
++ return;
++
++ while (*new) {
++ struct bfq_weight_counter *__counter = container_of(*new,
++ struct bfq_weight_counter,
++ weights_node);
++ parent = *new;
++
++ if (entity->weight == __counter->weight) {
++ entity->weight_counter = __counter;
++ goto inc_counter;
++ }
++ if (entity->weight < __counter->weight)
++ new = &((*new)->rb_left);
++ else
++ new = &((*new)->rb_right);
++ }
++
++ entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++ GFP_ATOMIC);
++ entity->weight_counter->weight = entity->weight;
++ rb_link_node(&entity->weight_counter->weights_node, parent, new);
++ rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++ entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root)
++{
++ /*
++ * Check whether the entity is actually associated with a counter.
++ * In fact, the device may not be considered NCQ-capable for a while,
++ * which implies that no insertion in the weight trees is performed,
++ * after which the device may start to be deemed NCQ-capable, and hence
++ * this function may start to be invoked. This may cause the function
++ * to be invoked for entities that are not associated with any counter.
++ */
++ if (!entity->weight_counter)
++ return;
++
++ BUG_ON(RB_EMPTY_ROOT(root));
++ BUG_ON(entity->weight_counter->weight != entity->weight);
++
++ BUG_ON(!entity->weight_counter->num_active);
++ entity->weight_counter->num_active--;
++ if (entity->weight_counter->num_active > 0)
++ goto reset_entity_pointer;
++
++ rb_erase(&entity->weight_counter->weights_node, root);
++ kfree(entity->weight_counter);
++
++reset_entity_pointer:
++ entity->weight_counter = NULL;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *last)
++{
++ struct rb_node *rbnext = rb_next(&last->rb_node);
++ struct rb_node *rbprev = rb_prev(&last->rb_node);
++ struct request *next = NULL, *prev = NULL;
++
++ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++ if (rbprev != NULL)
++ prev = rb_entry_rq(rbprev);
++
++ if (rbnext != NULL)
++ next = rb_entry_rq(rbnext);
++ else {
++ rbnext = rb_first(&bfqq->sort_list);
++ if (rbnext && rbnext != &last->rb_node)
++ next = rb_entry_rq(rbnext);
++ }
++
++ return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static inline unsigned long bfq_serv_to_charge(struct request *rq,
++ struct bfq_queue *bfqq)
++{
++ return blk_rq_sectors(rq) *
++ (1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) *
++ bfq_async_charge_factor));
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown). We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ struct request *next_rq = bfqq->next_rq;
++ unsigned long new_budget;
++
++ if (next_rq == NULL)
++ return;
++
++ if (bfqq == bfqd->in_service_queue)
++ /*
++ * In order not to break guarantees, budgets cannot be
++ * changed after an entity has been selected.
++ */
++ return;
++
++ BUG_ON(entity->tree != &st->active);
++ BUG_ON(entity == entity->sched_data->in_service_entity);
++
++ new_budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ if (entity->budget != new_budget) {
++ entity->budget = new_budget;
++ bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++ new_budget);
++ bfq_activate_bfqq(bfqd, bfqq);
++ }
++}
++
++static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++ u64 dur;
++
++ if (bfqd->bfq_wr_max_time > 0)
++ return bfqd->bfq_wr_max_time;
++
++ dur = bfqd->RT_prod;
++ do_div(dur, bfqd->peak_rate);
++
++ return dur;
++}
++
++static inline unsigned
++bfq_bfqq_cooperations(struct bfq_queue *bfqq)
++{
++ return bfqq->bic ? bfqq->bic->cooperations : 0;
++}
++
++static inline void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++ if (bic->saved_idle_window)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++ if (bic->saved_IO_bound)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ else
++ bfq_clear_bfqq_IO_bound(bfqq);
++ /* Assuming that the flag in_large_burst is already correctly set */
++ if (bic->wr_time_left && bfqq->bfqd->low_latency &&
++ !bfq_bfqq_in_large_burst(bfqq) &&
++ bic->cooperations < bfqq->bfqd->bfq_coop_thresh) {
++ /*
++ * Start a weight raising period with the duration given by
++ * the raising_time_left snapshot.
++ */
++ if (bfq_bfqq_busy(bfqq))
++ bfqq->bfqd->wr_busy_queues++;
++ bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bic->wr_time_left;
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->entity.ioprio_changed = 1;
++ }
++ /*
++ * Clear wr_time_left to prevent bfq_bfqq_save_state() from
++ * getting confused about the queue's need of a weight-raising
++ * period.
++ */
++ bic->wr_time_left = 0;
++}
++
++/* Must be called with the queue_lock held. */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++ int process_refs, io_refs;
++
++ io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++ process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++ BUG_ON(process_refs < 0);
++ return process_refs;
++}
++
++/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
++static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ struct bfq_queue *item;
++ struct hlist_node *n;
++
++ hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
++ hlist_del_init(&item->burst_list_node);
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++ bfqd->burst_size = 1;
++}
++
++/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
++static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ /* Increment burst size to take into account also bfqq */
++ bfqd->burst_size++;
++
++ if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
++ struct bfq_queue *pos, *bfqq_item;
++ struct hlist_node *n;
++
++ /*
++ * Enough queues have been activated shortly after each
++ * other to consider this burst as large.
++ */
++ bfqd->large_burst = true;
++
++ /*
++ * We can now mark all queues in the burst list as
++ * belonging to a large burst.
++ */
++ hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
++ burst_list_node)
++ bfq_mark_bfqq_in_large_burst(bfqq_item);
++ bfq_mark_bfqq_in_large_burst(bfqq);
++
++ /*
++ * From now on, and until the current burst finishes, any
++ * new queue being activated shortly after the last queue
++ * was inserted in the burst can be immediately marked as
++ * belonging to a large burst. So the burst list is not
++ * needed any more. Remove it.
++ */
++ hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
++ burst_list_node)
++ hlist_del_init(&pos->burst_list_node);
++ } else /* burst not yet large: add bfqq to the burst list */
++ hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++}
++
++/*
++ * If many queues happen to become active shortly after each other, then,
++ * to help the processes associated to these queues get their job done as
++ * soon as possible, it is usually better to not grant either weight-raising
++ * or device idling to these queues. In this comment we describe, firstly,
++ * the reasons why this fact holds, and, secondly, the next function, which
++ * implements the main steps needed to properly mark these queues so that
++ * they can then be treated in a different way.
++ *
++ * As for the terminology, we say that a queue becomes active, i.e.,
++ * switches from idle to backlogged, either when it is created (as a
++ * consequence of the arrival of an I/O request), or, if already existing,
++ * when a new request for the queue arrives while the queue is idle.
++ * Bursts of activations, i.e., activations of different queues occurring
++ * shortly after each other, are typically caused by services or applications
++ * that spawn or reactivate many parallel threads/processes. Examples are
++ * systemd during boot or git grep.
++ *
++ * These services or applications benefit mostly from a high throughput:
++ * the quicker the requests of the activated queues are cumulatively served,
++ * the sooner the target job of these queues gets completed. As a consequence,
++ * weight-raising any of these queues, which also implies idling the device
++ * for it, is almost always counterproductive: in most cases it just lowers
++ * throughput.
++ *
++ * On the other hand, a burst of activations may be also caused by the start
++ * of an application that does not consist in a lot of parallel I/O-bound
++ * threads. In fact, with a complex application, the burst may be just a
++ * consequence of the fact that several processes need to be executed to
++ * start-up the application. To start an application as quickly as possible,
++ * the best thing to do is to privilege the I/O related to the application
++ * with respect to all other I/O. Therefore, the best strategy to start as
++ * quickly as possible an application that causes a burst of activations is
++ * to weight-raise all the queues activated during the burst. This is the
++ * exact opposite of the best strategy for the other type of bursts.
++ *
++ * In the end, to take the best action for each of the two cases, the two
++ * types of bursts need to be distinguished. Fortunately, this seems
++ * relatively easy to do, by looking at the sizes of the bursts. In
++ * particular, we found a threshold such that bursts with a larger size
++ * than that threshold are apparently caused only by services or commands
++ * such as systemd or git grep. For brevity, hereafter we call just 'large'
++ * these bursts. BFQ *does not* weight-raise queues whose activations occur
++ * in a large burst. In addition, for each of these queues BFQ performs or
++ * does not perform idling depending on which choice boosts the throughput
++ * most. The exact choice depends on the device and request pattern at
++ * hand.
++ *
++ * Turning back to the next function, it implements all the steps needed
++ * to detect the occurrence of a large burst and to properly mark all the
++ * queues belonging to it (so that they can then be treated in a different
++ * way). This goal is achieved by maintaining a special "burst list" that
++ * holds, temporarily, the queues that belong to the burst in progress. The
++ * list is then used to mark these queues as belonging to a large burst if
++ * the burst does become large. The main steps are the following.
++ *
++ * . when the very first queue is activated, the queue is inserted into the
++ * list (as it could be the first queue in a possible burst)
++ *
++ * . if the current burst has not yet become large, and a queue Q that does
++ * not yet belong to the burst is activated shortly after the last time
++ * at which a new queue entered the burst list, then the function appends
++ * Q to the burst list
++ *
++ * . if, as a consequence of the previous step, the burst size reaches
++ * the large-burst threshold, then
++ *
++ * . all the queues in the burst list are marked as belonging to a
++ * large burst
++ *
++ * . the burst list is deleted; in fact, the burst list already served
++ * its purpose (keeping temporarily track of the queues in a burst,
++ * so as to be able to mark them as belonging to a large burst in the
++ * previous sub-step), and now is not needed any more
++ *
++ * . the device enters a large-burst mode
++ *
++ * . if a queue Q that does not belong to the burst is activated while
++ * the device is in large-burst mode and shortly after the last time
++ * at which a queue either entered the burst list or was marked as
++ * belonging to the current large burst, then Q is immediately marked
++ * as belonging to a large burst.
++ *
++ * . if a queue Q that does not belong to the burst is activated a while
++ * later, i.e., not shortly after, than the last time at which a queue
++ * either entered the burst list or was marked as belonging to the
++ * current large burst, then the current burst is deemed as finished and:
++ *
++ * . the large-burst mode is reset if set
++ *
++ * . the burst list is emptied
++ *
++ * . Q is inserted in the burst list, as Q may be the first queue
++ * in a possible new burst (then the burst list contains just Q
++ * after this step).
++ */
++static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ bool idle_for_long_time)
++{
++ /*
++ * If bfqq happened to be activated in a burst, but has been idle
++ * for at least as long as an interactive queue, then we assume
++ * that, in the overall I/O initiated in the burst, the I/O
++ * associated to bfqq is finished. So bfqq does not need to be
++ * treated as a queue belonging to a burst anymore. Accordingly,
++ * we reset bfqq's in_large_burst flag if set, and remove bfqq
++ * from the burst list if it's there. We do not decrement instead
++ * burst_size, because the fact that bfqq does not need to belong
++ * to the burst list any more does not invalidate the fact that
++ * bfqq may have been activated during the current burst.
++ */
++ if (idle_for_long_time) {
++ hlist_del_init(&bfqq->burst_list_node);
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ }
++
++ /*
++ * If bfqq is already in the burst list or is part of a large
++ * burst, then there is nothing else to do.
++ */
++ if (!hlist_unhashed(&bfqq->burst_list_node) ||
++ bfq_bfqq_in_large_burst(bfqq))
++ return;
++
++ /*
++ * If bfqq's activation happens late enough, then the current
++ * burst is finished, and related data structures must be reset.
++ *
++ * In this respect, consider the special case where bfqq is the very
++ * first queue being activated. In this case, last_ins_in_burst is
++ * not yet significant when we get here. But it is easy to verify
++ * that, whether or not the following condition is true, bfqq will
++ * end up being inserted into the burst list. In particular the
++ * list will happen to contain only bfqq. And this is exactly what
++ * has to happen, as bfqq may be the first queue in a possible
++ * burst.
++ */
++ if (time_is_before_jiffies(bfqd->last_ins_in_burst +
++ bfqd->bfq_burst_interval)) {
++ bfqd->large_burst = false;
++ bfq_reset_burst_list(bfqd, bfqq);
++ return;
++ }
++
++ /*
++ * If we get here, then bfqq is being activated shortly after the
++ * last queue. So, if the current burst is also large, we can mark
++ * bfqq as belonging to this large burst immediately.
++ */
++ if (bfqd->large_burst) {
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ return;
++ }
++
++ /*
++ * If we get here, then a large-burst state has not yet been
++ * reached, but bfqq is being activated shortly after the last
++ * queue. Then we add bfqq to the burst.
++ */
++ bfq_add_to_burst(bfqd, bfqq);
++}
++
++static void bfq_add_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *next_rq, *prev;
++ unsigned long old_wr_coeff = bfqq->wr_coeff;
++ bool interactive = false;
++
++ bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
++ bfqq->queued[rq_is_sync(rq)]++;
++ bfqd->queued++;
++
++ elv_rb_add(&bfqq->sort_list, rq);
++
++ /*
++ * Check if this request is a better next-serve candidate.
++ */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++ BUG_ON(next_rq == NULL);
++ bfqq->next_rq = next_rq;
++
++ /*
++ * Adjust priority tree position, if next_rq changes.
++ */
++ if (prev != bfqq->next_rq)
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++
++ if (!bfq_bfqq_busy(bfqq)) {
++ bool soft_rt, coop_or_in_burst,
++ idle_for_long_time = time_is_before_jiffies(
++ bfqq->budget_timeout +
++ bfqd->bfq_wr_min_idle_time);
++
++ if (bfq_bfqq_sync(bfqq)) {
++ bool already_in_burst =
++ !hlist_unhashed(&bfqq->burst_list_node) ||
++ bfq_bfqq_in_large_burst(bfqq);
++ bfq_handle_burst(bfqd, bfqq, idle_for_long_time);
++ /*
++ * If bfqq was not already in the current burst,
++ * then, at this point, bfqq either has been
++ * added to the current burst or has caused the
++ * current burst to terminate. In particular, in
++ * the second case, bfqq has become the first
++ * queue in a possible new burst.
++ * In both cases last_ins_in_burst needs to be
++ * moved forward.
++ */
++ if (!already_in_burst)
++ bfqd->last_ins_in_burst = jiffies;
++ }
++
++ coop_or_in_burst = bfq_bfqq_in_large_burst(bfqq) ||
++ bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh;
++ soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++ !coop_or_in_burst &&
++ time_is_before_jiffies(bfqq->soft_rt_next_start);
++ interactive = !coop_or_in_burst && idle_for_long_time;
++ entity->budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++
++ if (!bfq_bfqq_IO_bound(bfqq)) {
++ if (time_before(jiffies,
++ RQ_BIC(rq)->ttime.last_end_request +
++ bfqd->bfq_slice_idle)) {
++ bfqq->requests_within_timer++;
++ if (bfqq->requests_within_timer >=
++ bfqd->bfq_requests_within_timer)
++ bfq_mark_bfqq_IO_bound(bfqq);
++ } else
++ bfqq->requests_within_timer = 0;
++ }
++
++ if (!bfqd->low_latency)
++ goto add_bfqq_busy;
++
++ if (bfq_bfqq_just_split(bfqq))
++ goto set_ioprio_changed;
++
++ /*
++ * If the queue:
++ * - is not being boosted,
++ * - has been idle for enough time,
++ * - is not a sync queue or is linked to a bfq_io_cq (it is
++ * shared "for its nature" or it is not shared and its
++ * requests have not been redirected to a shared queue)
++ * start a weight-raising period.
++ */
++ if (old_wr_coeff == 1 && (interactive || soft_rt) &&
++ (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ if (interactive)
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ else
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ } else if (old_wr_coeff > 1) {
++ if (interactive)
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++ else if (coop_or_in_burst ||
++ (bfqq->wr_cur_max_time ==
++ bfqd->bfq_wr_rt_max_time &&
++ !soft_rt)) {
++ bfqq->wr_coeff = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->
++ wr_cur_max_time));
++ } else if (time_before(
++ bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time,
++ jiffies +
++ bfqd->bfq_wr_rt_max_time) &&
++ soft_rt) {
++ /*
++ *
++ * The remaining weight-raising time is lower
++ * than bfqd->bfq_wr_rt_max_time, which means
++ * that the application is enjoying weight
++ * raising either because deemed soft-rt in
++ * the near past, or because deemed interactive
++ * a long ago.
++ * In both cases, resetting now the current
++ * remaining weight-raising time for the
++ * application to the weight-raising duration
++ * for soft rt applications would not cause any
++ * latency increase for the application (as the
++ * new duration would be higher than the
++ * remaining time).
++ *
++ * In addition, the application is now meeting
++ * the requirements for being deemed soft rt.
++ * In the end we can correctly and safely
++ * (re)charge the weight-raising duration for
++ * the application with the weight-raising
++ * duration for soft rt applications.
++ *
++ * In particular, doing this recharge now, i.e.,
++ * before the weight-raising period for the
++ * application finishes, reduces the probability
++ * of the following negative scenario:
++ * 1) the weight of a soft rt application is
++ * raised at startup (as for any newly
++ * created application),
++ * 2) since the application is not interactive,
++ * at a certain time weight-raising is
++ * stopped for the application,
++ * 3) at that time the application happens to
++ * still have pending requests, and hence
++ * is destined to not have a chance to be
++ * deemed soft rt before these requests are
++ * completed (see the comments to the
++ * function bfq_bfqq_softrt_next_start()
++ * for details on soft rt detection),
++ * 4) these pending requests experience a high
++ * latency because the application is not
++ * weight-raised while they are pending.
++ */
++ bfqq->last_wr_start_finish = jiffies;
++ bfqq->wr_cur_max_time =
++ bfqd->bfq_wr_rt_max_time;
++ }
++ }
++set_ioprio_changed:
++ if (old_wr_coeff != bfqq->wr_coeff)
++ entity->ioprio_changed = 1;
++add_bfqq_busy:
++ bfqq->last_idle_bklogged = jiffies;
++ bfqq->service_from_backlogged = 0;
++ bfq_clear_bfqq_softrt_update(bfqq);
++ bfq_add_bfqq_busy(bfqd, bfqq);
++ } else {
++ if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++ time_is_before_jiffies(
++ bfqq->last_wr_start_finish +
++ bfqd->bfq_wr_min_inter_arr_async)) {
++ bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++ bfqd->wr_busy_queues++;
++ entity->ioprio_changed = 1;
++ bfq_log_bfqq(bfqd, bfqq,
++ "non-idle wrais starting at %lu, rais_max_time %u",
++ jiffies,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++ if (prev != bfqq->next_rq)
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ if (bfqd->low_latency &&
++ (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
++ bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++ struct bio *bio)
++{
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (bic == NULL)
++ return NULL;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ if (bfqq != NULL)
++ return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++ return NULL;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ bfqd->rq_in_driver++;
++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
++ (long long unsigned)bfqd->last_position);
++}
++
++static inline void bfq_deactivate_request(struct request_queue *q,
++ struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++
++ BUG_ON(bfqd->rq_in_driver == 0);
++ bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ const int sync = rq_is_sync(rq);
++
++ if (bfqq->next_rq == rq) {
++ bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++ bfq_updated_next_req(bfqd, bfqq);
++ }
++
++ list_del_init(&rq->queuelist);
++ BUG_ON(bfqq->queued[sync] == 0);
++ bfqq->queued[sync]--;
++ bfqd->queued--;
++ elv_rb_del(&bfqq->sort_list, rq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue)
++ bfq_del_bfqq_busy(bfqd, bfqq, 1);
++ /*
++ * Remove queue from request-position tree as it is empty.
++ */
++ if (bfqq->pos_root != NULL) {
++ rb_erase(&bfqq->pos_node, bfqq->pos_root);
++ bfqq->pos_root = NULL;
++ }
++ }
++
++ if (rq->cmd_flags & REQ_META) {
++ BUG_ON(bfqq->meta_pending == 0);
++ bfqq->meta_pending--;
++ }
++}
++
++static int bfq_merge(struct request_queue *q, struct request **req,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct request *__rq;
++
++ __rq = bfq_find_rq_fmerge(bfqd, bio);
++ if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
++ *req = __rq;
++ return ELEVATOR_FRONT_MERGE;
++ }
++
++ return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++ int type)
++{
++ if (type == ELEVATOR_FRONT_MERGE &&
++ rb_prev(&req->rb_node) &&
++ blk_rq_pos(req) <
++ blk_rq_pos(container_of(rb_prev(&req->rb_node),
++ struct request, rb_node))) {
++ struct bfq_queue *bfqq = RQ_BFQQ(req);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ struct request *prev, *next_rq;
++
++ /* Reposition request in its sort_list */
++ elv_rb_del(&bfqq->sort_list, req);
++ elv_rb_add(&bfqq->sort_list, req);
++ /* Choose next request to be served for bfqq */
++ prev = bfqq->next_rq;
++ next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++ bfqd->last_position);
++ BUG_ON(next_rq == NULL);
++ bfqq->next_rq = next_rq;
++ /*
++ * If next_rq changes, update both the queue's budget to
++ * fit the new request and the queue's position in its
++ * rq_pos_tree.
++ */
++ if (prev != bfqq->next_rq) {
++ bfq_updated_next_req(bfqd, bfqq);
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++ }
++ }
++}
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++ struct request *next)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * Reposition in fifo if next is older than rq.
++ */
++ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++ time_before(next->fifo_time, rq->fifo_time)) {
++ list_move(&rq->queuelist, &next->queuelist);
++ rq->fifo_time = next->fifo_time;
++ }
++
++ if (bfqq->next_rq == next)
++ bfqq->next_rq = rq;
++
++ bfq_remove_request(next);
++}
++
++/* Must be called with bfqq != NULL */
++static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq == NULL);
++ if (bfq_bfqq_busy(bfqq))
++ bfqq->bfqd->wr_busy_queues--;
++ bfqq->wr_coeff = 1;
++ bfqq->wr_cur_max_time = 0;
++ /* Trigger a weight change on the next activation of the queue */
++ bfqq->entity.ioprio_changed = 1;
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++ struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ if (bfqg->async_bfqq[i][j] != NULL)
++ bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++ if (bfqg->async_idle_bfqq != NULL)
++ bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++ bfq_bfqq_end_wr(bfqq);
++ bfq_end_wr_async(bfqd);
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
++{
++ if (request)
++ return blk_rq_pos(io_struct);
++ else
++ return ((struct bio *)io_struct)->bi_iter.bi_sector;
++}
++
++static inline sector_t bfq_dist_from(sector_t pos1,
++ sector_t pos2)
++{
++ if (pos1 >= pos2)
++ return pos1 - pos2;
++ else
++ return pos2 - pos1;
++}
++
++static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
++ sector_t sector)
++{
++ return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
++ BFQQ_SEEK_THR;
++}
++
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
++{
++ struct rb_root *root = &bfqd->rq_pos_tree;
++ struct rb_node *parent, *node;
++ struct bfq_queue *__bfqq;
++
++ if (RB_EMPTY_ROOT(root))
++ return NULL;
++
++ /*
++ * First, if we find a request starting at the end of the last
++ * request, choose it.
++ */
++ __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++ if (__bfqq != NULL)
++ return __bfqq;
++
++ /*
++ * If the exact sector wasn't found, the parent of the NULL leaf
++ * will contain the closest sector (rq_pos_tree sorted by
++ * next_request position).
++ */
++ __bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ if (blk_rq_pos(__bfqq->next_rq) < sector)
++ node = rb_next(&__bfqq->pos_node);
++ else
++ node = rb_prev(&__bfqq->pos_node);
++ if (node == NULL)
++ return NULL;
++
++ __bfqq = rb_entry(node, struct bfq_queue, pos_node);
++ if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
++ return __bfqq;
++
++ return NULL;
++}
++
++/*
++ * bfqd - obvious
++ * cur_bfqq - passed in so that we don't decide that the current queue
++ * is closely cooperating with itself
++ * sector - used as a reference point to search for a close queue
++ */
++static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
++ struct bfq_queue *cur_bfqq,
++ sector_t sector)
++{
++ struct bfq_queue *bfqq;
++
++ if (bfq_class_idle(cur_bfqq))
++ return NULL;
++ if (!bfq_bfqq_sync(cur_bfqq))
++ return NULL;
++ if (BFQQ_SEEKY(cur_bfqq))
++ return NULL;
++
++ /* If device has only one backlogged bfq_queue, don't search. */
++ if (bfqd->busy_queues == 1)
++ return NULL;
++
++ /*
++ * We should notice if some of the queues are cooperating, e.g.
++ * working closely on the same area of the disk. In that case,
++ * we can group them together and don't waste time idling.
++ */
++ bfqq = bfqq_close(bfqd, sector);
++ if (bfqq == NULL || bfqq == cur_bfqq)
++ return NULL;
++
++ /*
++ * Do not merge queues from different bfq_groups.
++ */
++ if (bfqq->entity.parent != cur_bfqq->entity.parent)
++ return NULL;
++
++ /*
++ * It only makes sense to merge sync queues.
++ */
++ if (!bfq_bfqq_sync(bfqq))
++ return NULL;
++ if (BFQQ_SEEKY(bfqq))
++ return NULL;
++
++ /*
++ * Do not merge queues of different priority classes.
++ */
++ if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
++ return NULL;
++
++ return bfqq;
++}
++
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ int process_refs, new_process_refs;
++ struct bfq_queue *__bfqq;
++
++ /*
++ * If there are no process references on the new_bfqq, then it is
++ * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++ * may have dropped their last reference (not just their last process
++ * reference).
++ */
++ if (!bfqq_process_refs(new_bfqq))
++ return NULL;
++
++ /* Avoid a circular list and skip interim queue merges. */
++ while ((__bfqq = new_bfqq->new_bfqq)) {
++ if (__bfqq == bfqq)
++ return NULL;
++ new_bfqq = __bfqq;
++ }
++
++ process_refs = bfqq_process_refs(bfqq);
++ new_process_refs = bfqq_process_refs(new_bfqq);
++ /*
++ * If the process for the bfqq has gone away, there is no
++ * sense in merging the queues.
++ */
++ if (process_refs == 0 || new_process_refs == 0)
++ return NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++ new_bfqq->pid);
++
++ /*
++ * Merging is just a redirection: the requests of the process
++ * owning one of the two queues are redirected to the other queue.
++ * The latter queue, in its turn, is set as shared if this is the
++ * first time that the requests of some process are redirected to
++ * it.
++ *
++ * We redirect bfqq to new_bfqq and not the opposite, because we
++ * are in the context of the process owning bfqq, hence we have
++ * the io_cq of this process. So we can immediately configure this
++ * io_cq to redirect the requests of the process to new_bfqq.
++ *
++ * NOTE, even if new_bfqq coincides with the in-service queue, the
++ * io_cq of new_bfqq is not available, because, if the in-service
++ * queue is shared, bfqd->in_service_bic may not point to the
++ * io_cq of the in-service queue.
++ * Redirecting the requests of the process owning bfqq to the
++ * currently in-service queue is in any case the best option, as
++ * we feed the in-service queue with new requests close to the
++ * last request served and, by doing so, hopefully increase the
++ * throughput.
++ */
++ bfqq->new_bfqq = new_bfqq;
++ atomic_add(process_refs, &new_bfqq->ref);
++ return new_bfqq;
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service queue
++ * or with a close queue among the scheduled queues.
++ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ *
++ * The OOM queue is not allowed to participate to cooperation: in fact, since
++ * the requests temporarily redirected to the OOM queue could be redirected
++ * again to dedicated queues at any time, the state needed to correctly
++ * handle merging with the OOM queue would be quite complex and expensive
++ * to maintain. Besides, in such a critical condition as an out of memory,
++ * the benefits of queue merging may be little relevant, or even negligible.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ void *io_struct, bool request)
++{
++ struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
++ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
++ return NULL;
++
++ in_service_bfqq = bfqd->in_service_queue;
++
++ if (in_service_bfqq == NULL || in_service_bfqq == bfqq ||
++ !bfqd->in_service_bic ||
++ unlikely(in_service_bfqq == &bfqd->oom_bfqq))
++ goto check_scheduled;
++
++ if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq))
++ goto check_scheduled;
++
++ if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq))
++ goto check_scheduled;
++
++ if (in_service_bfqq->entity.parent != bfqq->entity.parent)
++ goto check_scheduled;
++
++ if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++ bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) {
++ new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++ if (new_bfqq != NULL)
++ return new_bfqq; /* Merge with in-service queue */
++ }
++
++ /*
++ * Check whether there is a cooperator among currently scheduled
++ * queues. The only thing we need is that the bio/request is not
++ * NULL, as we need it to establish whether a cooperator exists.
++ */
++check_scheduled:
++ new_bfqq = bfq_close_cooperator(bfqd, bfqq,
++ bfq_io_struct_pos(io_struct, request));
++ if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq))
++ return bfq_setup_merge(bfqq, new_bfqq);
++
++ return NULL;
++}
++
++static inline void
++bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic == NULL, the queue is already shared or its requests
++ * have already been redirected to a shared queue; both idle window
++ * and weight raising state have already been saved. Do nothing.
++ */
++ if (bfqq->bic == NULL)
++ return;
++ if (bfqq->bic->wr_time_left)
++ /*
++ * This is the queue of a just-started process, and would
++ * deserve weight raising: we set wr_time_left to the full
++ * weight-raising duration to trigger weight-raising when
++ * and if the queue is split and the first request of the
++ * queue is enqueued.
++ */
++ bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd);
++ else if (bfqq->wr_coeff > 1) {
++ unsigned long wr_duration =
++ jiffies - bfqq->last_wr_start_finish;
++ /*
++ * It may happen that a queue's weight raising period lasts
++ * longer than its wr_cur_max_time, as weight raising is
++ * handled only when a request is enqueued or dispatched (it
++ * does not use any timer). If the weight raising period is
++ * about to end, don't save it.
++ */
++ if (bfqq->wr_cur_max_time <= wr_duration)
++ bfqq->bic->wr_time_left = 0;
++ else
++ bfqq->bic->wr_time_left =
++ bfqq->wr_cur_max_time - wr_duration;
++ /*
++ * The bfq_queue is becoming shared or the requests of the
++ * process owning the queue are being redirected to a shared
++ * queue. Stop the weight raising period of the queue, as in
++ * both cases it should not be owned by an interactive or
++ * soft real-time application.
++ */
++ bfq_bfqq_end_wr(bfqq);
++ } else
++ bfqq->bic->wr_time_left = 0;
++ bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++ bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++ bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
++ bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
++ bfqq->bic->cooperations++;
++ bfqq->bic->failed_cooperations = 0;
++}
++
++static inline void
++bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++ /*
++ * If bfqq->bic has a non-NULL value, the bic to which it belongs
++ * is about to begin using a shared bfq_queue.
++ */
++ if (bfqq->bic)
++ atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++ (long unsigned)new_bfqq->pid);
++ /* Save weight raising and idle window of the merged queues */
++ bfq_bfqq_save_state(bfqq);
++ bfq_bfqq_save_state(new_bfqq);
++ if (bfq_bfqq_IO_bound(bfqq))
++ bfq_mark_bfqq_IO_bound(new_bfqq);
++ bfq_clear_bfqq_IO_bound(bfqq);
++ /*
++ * Grab a reference to the bic, to prevent it from being destroyed
++ * before being possibly touched by a bfq_split_bfqq().
++ */
++ bfq_get_bic_reference(bfqq);
++ bfq_get_bic_reference(new_bfqq);
++ /*
++ * Merge queues (that is, let bic redirect its requests to new_bfqq)
++ */
++ bic_set_bfqq(bic, new_bfqq, 1);
++ bfq_mark_bfqq_coop(new_bfqq);
++ /*
++ * new_bfqq now belongs to at least two bics (it is a shared queue):
++ * set new_bfqq->bic to NULL. bfqq either:
++ * - does not belong to any bic any more, and hence bfqq->bic must
++ * be set to NULL, or
++ * - is a queue whose owning bics have already been redirected to a
++ * different queue, hence the queue is destined to not belong to
++ * any bic soon and bfqq->bic is already NULL (therefore the next
++ * assignment causes no harm).
++ */
++ new_bfqq->bic = NULL;
++ bfqq->bic = NULL;
++ bfq_put_queue(bfqq);
++}
++
++static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq)
++{
++ struct bfq_io_cq *bic = bfqq->bic;
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) {
++ bic->failed_cooperations++;
++ if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations)
++ bic->cooperations = 0;
++ }
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++ struct bio *bio)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq, *new_bfqq;
++
++ /*
++ * Disallow merge of a sync bio into an async request.
++ */
++ if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++ return 0;
++
++ /*
++ * Lookup the bfqq that this bio will be queued with. Allow
++ * merge only if rq is queued there.
++ * Queue lock is held here.
++ */
++ bic = bfq_bic_lookup(bfqd, current->io_context);
++ if (bic == NULL)
++ return 0;
++
++ bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++ /*
++ * We take advantage of this function to perform an early merge
++ * of the queues of possible cooperating processes.
++ */
++ if (bfqq != NULL) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++ if (new_bfqq != NULL) {
++ bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++ /*
++ * If we get here, the bio will be queued in the
++ * shared queue, i.e., new_bfqq, so use new_bfqq
++ * to decide whether bio and rq can be merged.
++ */
++ bfqq = new_bfqq;
++ } else
++ bfq_bfqq_increase_failed_cooperations(bfqq);
++ }
++
++ return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ if (bfqq != NULL) {
++ bfq_mark_bfqq_must_alloc(bfqq);
++ bfq_mark_bfqq_budget_new(bfqq);
++ bfq_clear_bfqq_fifo_expire(bfqq);
++
++ bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "set_in_service_queue, cur-budget = %lu",
++ bfqq->entity.budget);
++ }
++
++ bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++ __bfq_set_in_service_queue(bfqd, bfqq);
++ return bfqq;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < 194)
++ return bfq_default_max_budget;
++ else
++ return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
++{
++ if (bfqd->budgets_assigned < 194)
++ return bfq_default_max_budget / 32;
++ else
++ return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ struct bfq_io_cq *bic;
++ unsigned long sl;
++
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Processes have exited, don't wait. */
++ bic = bfqd->in_service_bic;
++ if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
++ return;
++
++ bfq_mark_bfqq_wait_request(bfqq);
++
++ /*
++ * We don't want to idle for seeks, but we do want to allow
++ * fair distribution of slice time for a process doing back-to-back
++ * seeks. So allow a little bit of time for him to submit a new rq.
++ *
++ * To prevent processes with (partly) seeky workloads from
++ * being too ill-treated, grant them a small fraction of the
++ * assigned budget before reducing the waiting time to
++ * BFQ_MIN_TT. This happened to help reduce latency.
++ */
++ sl = bfqd->bfq_slice_idle;
++ /*
++ * Unless the queue is being weight-raised, grant only minimum idle
++ * time if the queue either has been seeky for long enough or has
++ * already proved to be constantly seeky.
++ */
++ if (bfq_sample_valid(bfqq->seek_samples) &&
++ ((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
++ bfq_max_budget(bfqq->bfqd) / 8) ||
++ bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1)
++ sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
++ else if (bfqq->wr_coeff > 1)
++ sl = sl * 3;
++ bfqd->last_idling_start = ktime_get();
++ mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
++ bfq_log(bfqd, "arm idle: %u/%u ms",
++ jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the disk
++ * throughput (always guaranteed with a time slice scheme as in CFQ).
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq = bfqd->in_service_queue;
++ unsigned int timeout_coeff;
++ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++ timeout_coeff = 1;
++ else
++ timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++ bfqd->last_budget_start = ktime_get();
++
++ bfq_clear_bfqq_budget_new(bfqq);
++ bfqq->budget_timeout = jiffies +
++ bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
++
++ bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++ jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
++ timeout_coeff));
++}
++
++/*
++ * Move request from internal lists to the request queue dispatch list.
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ /*
++ * For consistency, the next instruction should have been executed
++ * after removing the request from the queue and dispatching it.
++ * We execute instead this instruction before bfq_remove_request()
++ * (and hence introduce a temporary inconsistency), for efficiency.
++ * In fact, in a forced_dispatch, this prevents two counters related
++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++ * is not in service, and then to be incremented again after
++ * incrementing bfqq->dispatched.
++ */
++ bfqq->dispatched++;
++ bfq_remove_request(rq);
++ elv_dispatch_sort(q, rq);
++
++ if (bfq_bfqq_sync(bfqq))
++ bfqd->sync_flight++;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
++{
++ struct request *rq = NULL;
++
++ if (bfq_bfqq_fifo_expire(bfqq))
++ return NULL;
++
++ bfq_mark_bfqq_fifo_expire(bfqq);
++
++ if (list_empty(&bfqq->fifo))
++ return NULL;
++
++ rq = rq_entry_fifo(bfqq->fifo.next);
++
++ if (time_before(jiffies, rq->fifo_time))
++ return NULL;
++
++ return rq;
++}
++
++static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ return entity->budget - entity->service;
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ __bfq_bfqd_reset_in_service(bfqd);
++
++ /*
++ * If this bfqq is shared between multiple processes, check
++ * to make sure that those processes are still issuing I/Os
++ * within the mean seek distance. If not, it may be time to
++ * break the queues apart again.
++ */
++ if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++ bfq_mark_bfqq_split_coop(bfqq);
++
++ if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * Overloading budget_timeout field to store the time
++ * at which the queue remains with no backlog; used by
++ * the weight-raising mechanism.
++ */
++ bfqq->budget_timeout = jiffies;
++ bfq_del_bfqq_busy(bfqd, bfqq, 1);
++ } else {
++ bfq_activate_bfqq(bfqd, bfqq);
++ /*
++ * Resort priority tree of potential close cooperators.
++ */
++ bfq_rq_pos_tree_add(bfqd, bfqq);
++ }
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget. See the body for detailed
++ * comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ enum bfqq_expiration reason)
++{
++ struct request *next_rq;
++ unsigned long budget, min_budget;
++
++ budget = bfqq->max_budget;
++ min_budget = bfq_min_budget(bfqd);
++
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
++ bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
++ budget, bfq_min_budget(bfqd));
++ bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++ bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++ if (bfq_bfqq_sync(bfqq)) {
++ switch (reason) {
++ /*
++ * Caveat: in all the following cases we trade latency
++ * for throughput.
++ */
++ case BFQ_BFQQ_TOO_IDLE:
++ /*
++ * This is the only case where we may reduce
++ * the budget: if there is no request of the
++ * process still waiting for completion, then
++ * we assume (tentatively) that the timer has
++ * expired because the batch of requests of
++ * the process could have been served with a
++ * smaller budget. Hence, betting that
++ * process will behave in the same way when it
++ * becomes backlogged again, we reduce its
++ * next budget. As long as we guess right,
++ * this budget cut reduces the latency
++ * experienced by the process.
++ *
++ * However, if there are still outstanding
++ * requests, then the process may have not yet
++ * issued its next request just because it is
++ * still waiting for the completion of some of
++ * the still outstanding ones. So in this
++ * subcase we do not reduce its budget, on the
++ * contrary we increase it to possibly boost
++ * the throughput, as discussed in the
++ * comments to the BUDGET_TIMEOUT case.
++ */
++ if (bfqq->dispatched > 0) /* still outstanding reqs */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ else {
++ if (budget > 5 * min_budget)
++ budget -= 4 * min_budget;
++ else
++ budget = min_budget;
++ }
++ break;
++ case BFQ_BFQQ_BUDGET_TIMEOUT:
++ /*
++ * We double the budget here because: 1) it
++ * gives the chance to boost the throughput if
++ * this is not a seeky process (which may have
++ * bumped into this timeout because of, e.g.,
++ * ZBR), 2) together with charge_full_budget
++ * it helps give seeky processes higher
++ * timestamps, and hence be served less
++ * frequently.
++ */
++ budget = min(budget * 2, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_BUDGET_EXHAUSTED:
++ /*
++ * The process still has backlog, and did not
++ * let either the budget timeout or the disk
++ * idling timeout expire. Hence it is not
++ * seeky, has a short thinktime and may be
++ * happy with a higher budget too. So
++ * definitely increase the budget of this good
++ * candidate to boost the disk throughput.
++ */
++ budget = min(budget * 4, bfqd->bfq_max_budget);
++ break;
++ case BFQ_BFQQ_NO_MORE_REQUESTS:
++ /*
++ * Leave the budget unchanged.
++ */
++ default:
++ return;
++ }
++ } else /* async queue */
++ /* async queues get always the maximum possible budget
++ * (their ability to dispatch is limited by
++ * @bfqd->bfq_max_budget_async_rq).
++ */
++ budget = bfqd->bfq_max_budget;
++
++ bfqq->max_budget = budget;
++
++ if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
++ bfqq->max_budget > bfqd->bfq_max_budget)
++ bfqq->max_budget = bfqd->bfq_max_budget;
++
++ /*
++ * Make sure that we have enough budget for the next request.
++ * Since the finish time of the bfqq must be kept in sync with
++ * the budget, be sure to call __bfq_bfqq_expire() after the
++ * update.
++ */
++ next_rq = bfqq->next_rq;
++ if (next_rq != NULL)
++ bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++ bfq_serv_to_charge(next_rq, bfqq));
++ else
++ bfqq->entity.budget = bfqq->max_budget;
++
++ bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
++ next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
++ bfqq->entity.budget);
++}
++
++static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
++{
++ unsigned long max_budget;
++
++ /*
++ * The max_budget calculated when autotuning is equal to the
++ * amount of sectors transfered in timeout_sync at the
++ * estimated peak rate.
++ */
++ max_budget = (unsigned long)(peak_rate * 1000 *
++ timeout >> BFQ_RATE_SHIFT);
++
++ return max_budget;
++}
++
++/*
++ * In addition to updating the peak rate, checks whether the process
++ * is "slow", and returns 1 if so. This slow flag is used, in addition
++ * to the budget timeout, to reduce the amount of service provided to
++ * seeky processes, and hence reduce their chances to lower the
++ * throughput. See the code for more details.
++ */
++static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int compensate, enum bfqq_expiration reason)
++{
++ u64 bw, usecs, expected, timeout;
++ ktime_t delta;
++ int update = 0;
++
++ if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
++ return 0;
++
++ if (compensate)
++ delta = bfqd->last_idling_start;
++ else
++ delta = ktime_get();
++ delta = ktime_sub(delta, bfqd->last_budget_start);
++ usecs = ktime_to_us(delta);
++
++ /* Don't trust short/unrealistic values. */
++ if (usecs < 100 || usecs >= LONG_MAX)
++ return 0;
++
++ /*
++ * Calculate the bandwidth for the last slice. We use a 64 bit
++ * value to store the peak rate, in sectors per usec in fixed
++ * point math. We do so to have enough precision in the estimate
++ * and to avoid overflows.
++ */
++ bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
++ do_div(bw, (unsigned long)usecs);
++
++ timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++ /*
++ * Use only long (> 20ms) intervals to filter out spikes for
++ * the peak rate estimation.
++ */
++ if (usecs > 20000) {
++ if (bw > bfqd->peak_rate ||
++ (!BFQQ_SEEKY(bfqq) &&
++ reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
++ bfq_log(bfqd, "measured bw =%llu", bw);
++ /*
++ * To smooth oscillations use a low-pass filter with
++ * alpha=7/8, i.e.,
++ * new_rate = (7/8) * old_rate + (1/8) * bw
++ */
++ do_div(bw, 8);
++ if (bw == 0)
++ return 0;
++ bfqd->peak_rate *= 7;
++ do_div(bfqd->peak_rate, 8);
++ bfqd->peak_rate += bw;
++ update = 1;
++ bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
++ }
++
++ update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
++
++ if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
++ bfqd->peak_rate_samples++;
++
++ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
++ update) {
++ int dev_type = blk_queue_nonrot(bfqd->queue);
++ if (bfqd->bfq_user_max_budget == 0) {
++ bfqd->bfq_max_budget =
++ bfq_calc_max_budget(bfqd->peak_rate,
++ timeout);
++ bfq_log(bfqd, "new max_budget=%lu",
++ bfqd->bfq_max_budget);
++ }
++ if (bfqd->device_speed == BFQ_BFQD_FAST &&
++ bfqd->peak_rate < device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_SLOW;
++ bfqd->RT_prod = R_slow[dev_type] *
++ T_slow[dev_type];
++ } else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++ bfqd->peak_rate > device_speed_thresh[dev_type]) {
++ bfqd->device_speed = BFQ_BFQD_FAST;
++ bfqd->RT_prod = R_fast[dev_type] *
++ T_fast[dev_type];
++ }
++ }
++ }
++
++ /*
++ * If the process has been served for a too short time
++ * interval to let its possible sequential accesses prevail on
++ * the initial seek time needed to move the disk head on the
++ * first sector it requested, then give the process a chance
++ * and for the moment return false.
++ */
++ if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
++ return 0;
++
++ /*
++ * A process is considered ``slow'' (i.e., seeky, so that we
++ * cannot treat it fairly in the service domain, as it would
++ * slow down too much the other processes) if, when a slice
++ * ends for whatever reason, it has received service at a
++ * rate that would not be high enough to complete the budget
++ * before the budget timeout expiration.
++ */
++ expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
++
++ /*
++ * Caveat: processes doing IO in the slower disk zones will
++ * tend to be slow(er) even if not seeky. And the estimated
++ * peak rate will actually be an average over the disk
++ * surface. Hence, to not be too harsh with unlucky processes,
++ * we keep a budget/3 margin of safety before declaring a
++ * process slow.
++ */
++ return expected > (4 * bfqq->entity.budget) / 3;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ * than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ * HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ * for a while, then suddenly 'jump' by several units to recover the lost
++ * increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ return max(bfqq->last_idle_bklogged +
++ HZ * bfqq->service_from_backlogged /
++ bfqd->bfq_wr_max_softrt_rate,
++ jiffies + bfqq->bfqd->bfq_slice_idle + 4);
++}
++
++/*
++ * Return the largest-possible time instant such that, for as long as possible,
++ * the current time will be lower than this time instant according to the macro
++ * time_is_before_jiffies().
++ */
++static inline unsigned long bfq_infinity_from_now(unsigned long now)
++{
++ return now + ULONG_MAX / 2;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ *
++ * If the process associated to the queue is slow (i.e., seeky), or in
++ * case of budget timeout, or, finally, if it is async, we
++ * artificially charge it an entire budget (independently of the
++ * actual service it received). As a consequence, the queue will get
++ * higher timestamps than the correct ones upon reactivation, and
++ * hence it will be rescheduled as if it had received more service
++ * than what it actually received. In the end, this class of processes
++ * will receive less service in proportion to how slowly they consume
++ * their budgets (and hence how seriously they tend to lower the
++ * throughput).
++ *
++ * In contrast, when a queue expires because it has been idling for
++ * too much or because it exhausted its budget, we do not touch the
++ * amount of service it has received. Hence when the queue will be
++ * reactivated and its timestamps updated, the latter will be in sync
++ * with the actual service received by the queue until expiration.
++ *
++ * Charging a full budget to the first type of queues and the exact
++ * service to the others has the effect of using the WF2Q+ policy to
++ * schedule the former on a timeslice basis, without violating the
++ * service domain guarantees of the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ int compensate,
++ enum bfqq_expiration reason)
++{
++ int slow;
++ BUG_ON(bfqq != bfqd->in_service_queue);
++
++ /* Update disk peak rate for autotuning and check whether the
++ * process is slow (see bfq_update_peak_rate).
++ */
++ slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
++
++ /*
++ * As above explained, 'punish' slow (i.e., seeky), timed-out
++ * and async queues, to favor sequential sync workloads.
++ *
++ * Processes doing I/O in the slower disk zones will tend to be
++ * slow(er) even if not seeky. Hence, since the estimated peak
++ * rate is actually an average over the disk surface, these
++ * processes may timeout just for bad luck. To avoid punishing
++ * them we do not charge a full budget to a process that
++ * succeeded in consuming at least 2/3 of its budget.
++ */
++ if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3))
++ bfq_bfqq_charge_full_budget(bfqq);
++
++ bfqq->service_from_backlogged += bfqq->entity.service;
++
++ if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++ !bfq_bfqq_constantly_seeky(bfqq)) {
++ bfq_mark_bfqq_constantly_seeky(bfqq);
++ if (!blk_queue_nonrot(bfqd->queue))
++ bfqd->const_seeky_busy_in_flight_queues++;
++ }
++
++ if (reason == BFQ_BFQQ_TOO_IDLE &&
++ bfqq->entity.service <= 2 * bfqq->entity.budget / 10 )
++ bfq_clear_bfqq_IO_bound(bfqq);
++
++ if (bfqd->low_latency && bfqq->wr_coeff == 1)
++ bfqq->last_wr_start_finish = jiffies;
++
++ if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list)) {
++ /*
++ * If we get here, and there are no outstanding requests,
++ * then the request pattern is isochronous (see the comments
++ * to the function bfq_bfqq_softrt_next_start()). Hence we
++ * can compute soft_rt_next_start. If, instead, the queue
++ * still has outstanding requests, then we have to wait
++ * for the completion of all the outstanding requests to
++ * discover whether the request pattern is actually
++ * isochronous.
++ */
++ if (bfqq->dispatched == 0)
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++ else {
++ /*
++ * The application is still waiting for the
++ * completion of one or more requests:
++ * prevent it from possibly being incorrectly
++ * deemed as soft real-time by setting its
++ * soft_rt_next_start to infinity. In fact,
++ * without this assignment, the application
++ * would be incorrectly deemed as soft
++ * real-time if:
++ * 1) it issued a new request before the
++ * completion of all its in-flight
++ * requests, and
++ * 2) at that time, its soft_rt_next_start
++ * happened to be in the past.
++ */
++ bfqq->soft_rt_next_start =
++ bfq_infinity_from_now(jiffies);
++ /*
++ * Schedule an update of soft_rt_next_start to when
++ * the task may be discovered to be isochronous.
++ */
++ bfq_mark_bfqq_softrt_update(bfqq);
++ }
++ }
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
++ slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
++
++ /*
++ * Increase, decrease or leave budget unchanged according to
++ * reason.
++ */
++ __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++ __bfq_bfqq_expire(bfqd, bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_budget_new(bfqq) ||
++ time_before(jiffies, bfqq->budget_timeout))
++ return 0;
++ return 1;
++}
++
++/*
++ * If we expire a queue that is waiting for the arrival of a new
++ * request, we may prevent the fictitious timestamp back-shifting that
++ * allows the guarantees of the queue to be preserved (see [1] for
++ * this tricky aspect). Hence we return true only if this condition
++ * does not hold, or if the queue is slow enough to deserve only to be
++ * kicked off for preserving a high throughput.
++*/
++static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "may_budget_timeout: wait_request %d left %d timeout %d",
++ bfq_bfqq_wait_request(bfqq),
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3,
++ bfq_bfqq_budget_timeout(bfqq));
++
++ return (!bfq_bfqq_wait_request(bfqq) ||
++ bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)
++ &&
++ bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * Device idling is allowed only for the queues for which this function
++ * returns true. For this reason, the return value of this function plays a
++ * critical role for both throughput boosting and service guarantees. The
++ * return value is computed through a logical expression. In this rather
++ * long comment, we try to briefly describe all the details and motivations
++ * behind the components of this logical expression.
++ *
++ * First, the expression is false if bfqq is not sync, or if: bfqq happened
++ * to become active during a large burst of queue activations, and the
++ * pattern of requests bfqq contains boosts the throughput if bfqq is
++ * expired. In fact, queues that became active during a large burst benefit
++ * only from throughput, as discussed in the comments to bfq_handle_burst.
++ * In this respect, expiring bfqq certainly boosts the throughput on NCQ-
++ * capable flash-based devices, whereas, on rotational devices, it boosts
++ * the throughput only if bfqq contains random requests.
++ *
++ * On the opposite end, if (a) bfqq is sync, (b) the above burst-related
++ * condition does not hold, and (c) bfqq is being weight-raised, then the
++ * expression always evaluates to true, as device idling is instrumental
++ * for preserving low-latency guarantees (see [1]). If, instead, conditions
++ * (a) and (b) do hold, but (c) does not, then the expression evaluates to
++ * true only if: (1) bfqq is I/O-bound and has a non-null idle window, and
++ * (2) at least one of the following two conditions holds.
++ * The first condition is that the device is not performing NCQ, because
++ * idling the device most certainly boosts the throughput if this condition
++ * holds and bfqq is I/O-bound and has been granted a non-null idle window.
++ * The second compound condition is made of the logical AND of two components.
++ *
++ * The first component is true only if there is no weight-raised busy
++ * queue. This guarantees that the device is not idled for a sync non-
++ * weight-raised queue when there are busy weight-raised queues. The former
++ * is then expired immediately if empty. Combined with the timestamping
++ * rules of BFQ (see [1] for details), this causes sync non-weight-raised
++ * queues to get a lower number of requests served, and hence to ask for a
++ * lower number of requests from the request pool, before the busy weight-
++ * raised queues get served again.
++ *
++ * This is beneficial for the processes associated with weight-raised
++ * queues, when the request pool is saturated (e.g., in the presence of
++ * write hogs). In fact, if the processes associated with the other queues
++ * ask for requests at a lower rate, then weight-raised processes have a
++ * higher probability to get a request from the pool immediately (or at
++ * least soon) when they need one. Hence they have a higher probability to
++ * actually get a fraction of the disk throughput proportional to their
++ * high weight. This is especially true with NCQ-capable drives, which
++ * enqueue several requests in advance and further reorder internally-
++ * queued requests.
++ *
++ * In the end, mistreating non-weight-raised queues when there are busy
++ * weight-raised queues seems to mitigate starvation problems in the
++ * presence of heavy write workloads and NCQ, and hence to guarantee a
++ * higher application and system responsiveness in these hostile scenarios.
++ *
++ * If the first component of the compound condition is instead true, i.e.,
++ * there is no weight-raised busy queue, then the second component of the
++ * compound condition takes into account service-guarantee and throughput
++ * issues related to NCQ (recall that the compound condition is evaluated
++ * only if the device is detected as supporting NCQ).
++ *
++ * As for service guarantees, allowing the drive to enqueue more than one
++ * request at a time, and hence delegating de facto final scheduling
++ * decisions to the drive's internal scheduler, causes loss of control on
++ * the actual request service order. In this respect, when the drive is
++ * allowed to enqueue more than one request at a time, the service
++ * distribution enforced by the drive's internal scheduler is likely to
++ * coincide with the desired device-throughput distribution only in the
++ * following, perfectly symmetric, scenario:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ * weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ * number of children.
++ *
++ * Even in such a scenario, sequential I/O may still receive a preferential
++ * treatment, but this is not likely to be a big issue with flash-based
++ * devices, because of their non-dramatic loss of throughput with random
++ * I/O. Things do differ with HDDs, for which additional care is taken, as
++ * explained after completing the discussion for flash-based devices.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore BFQ evaluates instead the following stronger sub-conditions,
++ * for which it is much easier to maintain the needed state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, hence no state needs
++ * to be maintained in this case.
++ *
++ * According to the above considerations, the second component of the
++ * compound condition evaluates to true if any of the above symmetry
++ * sub-condition does not hold, or the device is not flash-based. Therefore,
++ * if also the first component is true, then idling is allowed for a sync
++ * queue. These are the only sub-conditions considered if the device is
++ * flash-based, as, for such a device, it is sensible to force idling only
++ * for service-guarantee issues. In fact, as for throughput, idling
++ * NCQ-capable flash-based devices would not boost the throughput even
++ * with sequential I/O; rather it would lower the throughput in proportion
++ * to how fast the device is. In the end, (only) if all the three
++ * sub-conditions hold and the device is flash-based, the compound
++ * condition evaluates to false and therefore no idling is performed.
++ *
++ * As already said, things change with a rotational device, where idling
++ * boosts the throughput with sequential I/O (even with NCQ). Hence, for
++ * such a device the second component of the compound condition evaluates
++ * to true also if the following additional sub-condition does not hold:
++ * the queue is constantly seeky. Unfortunately, this different behavior
++ * with respect to flash-based devices causes an additional asymmetry: if
++ * some sync queues enjoy idling and some other sync queues do not, then
++ * the latter get a low share of the device throughput, simply because the
++ * former get many requests served after being set as in service, whereas
++ * the latter do not. As a consequence, to guarantee the desired throughput
++ * distribution, on HDDs the compound expression evaluates to true (and
++ * hence device idling is performed) also if the following last symmetry
++ * condition does not hold: no other queue is benefiting from idling. Also
++ * this last condition is actually replaced with a simpler-to-maintain and
++ * stronger condition: there is no busy queue which is not constantly seeky
++ * (and hence may also benefit from idling).
++ *
++ * To sum up, when all the required symmetry and throughput-boosting
++ * sub-conditions hold, the second component of the compound condition
++ * evaluates to false, and hence no idling is performed. This helps to
++ * keep the drives' internal queues full on NCQ-capable devices, and hence
++ * to boost the throughput, without causing 'almost' any loss of service
++ * guarantees. The 'almost' follows from the fact that, if the internal
++ * queue of one such device is filled while all the sub-conditions hold,
++ * but at some point in time some sub-condition stops to hold, then it may
++ * become impossible to let requests be served in the new desired order
++ * until all the requests already queued in the device have been served.
++ */
++static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++#define symmetric_scenario (!bfqd->active_numerous_groups && \
++ !bfq_differentiated_weights(bfqd))
++#else
++#define symmetric_scenario (!bfq_differentiated_weights(bfqd))
++#endif
++#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
++ bfqd->busy_in_flight_queues == \
++ bfqd->const_seeky_busy_in_flight_queues)
++
++#define cond_for_expiring_in_burst (bfq_bfqq_in_large_burst(bfqq) && \
++ bfqd->hw_tag && \
++ (blk_queue_nonrot(bfqd->queue) || \
++ bfq_bfqq_constantly_seeky(bfqq)))
++
++/*
++ * Condition for expiring a non-weight-raised queue (and hence not idling
++ * the device).
++ */
++#define cond_for_expiring_non_wr (bfqd->hw_tag && \
++ (bfqd->wr_busy_queues > 0 || \
++ (symmetric_scenario && \
++ (blk_queue_nonrot(bfqd->queue) || \
++ cond_for_seeky_on_ncq_hdd))))
++
++ return bfq_bfqq_sync(bfqq) &&
++ !cond_for_expiring_in_burst &&
++ (bfqq->wr_coeff > 1 ||
++ (bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_idle_window(bfqq) &&
++ !cond_for_expiring_non_wr)
++ );
++}
++
++/*
++ * If the in-service queue is empty but sync, and the function
++ * bfq_bfqq_must_not_expire returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the disk must be idled to wait for the possible arrival of a new
++ * request for the queue.
++ * See the comments to the function bfq_bfqq_must_not_expire for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_must_not_expire itself
++ * returns true.
++ */
++static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
++ bfq_bfqq_must_not_expire(bfqq);
++}
++
++/*
++ * Select a queue for service. If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq;
++ struct request *next_rq;
++ enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq == NULL)
++ goto new_queue;
++
++ bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++ if (bfq_may_expire_for_budg_timeout(bfqq) &&
++ !timer_pending(&bfqd->idle_slice_timer) &&
++ !bfq_bfqq_must_idle(bfqq))
++ goto expire;
++
++ next_rq = bfqq->next_rq;
++ /*
++ * If bfqq has requests queued and it has enough budget left to
++ * serve them, keep the queue, otherwise expire it.
++ */
++ if (next_rq != NULL) {
++ if (bfq_serv_to_charge(next_rq, bfqq) >
++ bfq_bfqq_budget_left(bfqq)) {
++ reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++ goto expire;
++ } else {
++ /*
++ * The idle timer may be pending because we may
++ * not disable disk idling even when a new request
++ * arrives.
++ */
++ if (timer_pending(&bfqd->idle_slice_timer)) {
++ /*
++ * If we get here: 1) at least a new request
++ * has arrived but we have not disabled the
++ * timer because the request was too small,
++ * 2) then the block layer has unplugged
++ * the device, causing the dispatch to be
++ * invoked.
++ *
++ * Since the device is unplugged, now the
++ * requests are probably large enough to
++ * provide a reasonable throughput.
++ * So we disable idling.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++ }
++ goto keep_queue;
++ }
++ }
++
++ /*
++ * No requests pending. If the in-service queue still has requests
++ * in flight (possibly waiting for a completion) or is idling for a
++ * new request, then keep it.
++ */
++ if (timer_pending(&bfqd->idle_slice_timer) ||
++ (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) {
++ bfqq = NULL;
++ goto keep_queue;
++ }
++
++ reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, 0, reason);
++new_queue:
++ bfqq = bfq_set_in_service_queue(bfqd);
++ bfq_log(bfqd, "select_queue: new queue %d returned",
++ bfqq != NULL ? bfqq->pid : 0);
++keep_queue:
++ return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
++ bfq_log_bfqq(bfqd, bfqq,
++ "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time),
++ bfqq->wr_coeff,
++ bfqq->entity.weight, bfqq->entity.orig_weight);
++
++ BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++ entity->orig_weight * bfqq->wr_coeff);
++ if (entity->ioprio_changed)
++ bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
++ /*
++ * If the queue was activated in a burst, or
++ * too much time has elapsed from the beginning
++ * of this weight-raising period, or the queue has
++ * exceeded the acceptable number of cooperations,
++ * then end weight raising.
++ */
++ if (bfq_bfqq_in_large_burst(bfqq) ||
++ bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh ||
++ time_is_before_jiffies(bfqq->last_wr_start_finish +
++ bfqq->wr_cur_max_time)) {
++ bfqq->last_wr_start_finish = jiffies;
++ bfq_log_bfqq(bfqd, bfqq,
++ "wrais ending at %lu, rais_max_time %u",
++ bfqq->last_wr_start_finish,
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ bfq_bfqq_end_wr(bfqq);
++ }
++ }
++ /* Update weight both if it must be raised and if it must be lowered */
++ if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++ __bfq_entity_update_weight_prio(
++ bfq_entity_service_tree(entity),
++ entity);
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++ struct request *rq;
++ unsigned long service_to_charge;
++
++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ /* Follow expired path, else get first next available. */
++ rq = bfq_check_fifo(bfqq);
++ if (rq == NULL)
++ rq = bfqq->next_rq;
++ service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++ if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
++ /*
++ * This may happen if the next rq is chosen in fifo order
++ * instead of sector order. The budget is properly
++ * dimensioned to be always sufficient to serve the next
++ * request only if it is chosen in sector order. The reason
++ * is that it would be quite inefficient and little useful
++ * to always make sure that the budget is large enough to
++ * serve even the possible next rq in fifo order.
++ * In fact, requests are seldom served in fifo order.
++ *
++ * Expire the queue for budget exhaustion, and make sure
++ * that the next act_budget is enough to serve the next
++ * request, even if it comes from the fifo expired path.
++ */
++ bfqq->next_rq = rq;
++ /*
++ * Since this dispatch is failed, make sure that
++ * a new one will be performed
++ */
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++ goto expire;
++ }
++
++ /* Finally, insert request into driver dispatch list. */
++ bfq_bfqq_served(bfqq, service_to_charge);
++ bfq_dispatch_insert(bfqd->queue, rq);
++
++ bfq_update_wr_data(bfqd, bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "dispatched %u sec req (%llu), budg left %lu",
++ blk_rq_sectors(rq),
++ (long long unsigned)blk_rq_pos(rq),
++ bfq_bfqq_budget_left(bfqq));
++
++ dispatched++;
++
++ if (bfqd->in_service_bic == NULL) {
++ atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++ bfqd->in_service_bic = RQ_BIC(rq);
++ }
++
++ if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
++ dispatched >= bfqd->bfq_max_budget_async_rq) ||
++ bfq_class_idle(bfqq)))
++ goto expire;
++
++ return dispatched;
++
++expire:
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
++ return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++ int dispatched = 0;
++
++ while (bfqq->next_rq != NULL) {
++ bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++ dispatched++;
++ }
++
++ BUG_ON(!list_empty(&bfqq->fifo));
++ return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++ struct bfq_queue *bfqq, *n;
++ struct bfq_service_tree *st;
++ int dispatched = 0;
++
++ bfqq = bfqd->in_service_queue;
++ if (bfqq != NULL)
++ __bfq_bfqq_expire(bfqd, bfqq);
++
++ /*
++ * Loop through classes, and be careful to leave the scheduler
++ * in a consistent state, as feedback mechanisms and vtime
++ * updates cannot be disabled during the process.
++ */
++ list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++ st = bfq_entity_service_tree(&bfqq->entity);
++
++ dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++ bfqq->max_budget = bfq_max_budget(bfqd);
++
++ bfq_forget_idle(st);
++ }
++
++ BUG_ON(bfqd->busy_queues != 0);
++
++ return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq;
++ int max_dispatch;
++
++ bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++ if (bfqd->busy_queues == 0)
++ return 0;
++
++ if (unlikely(force))
++ return bfq_forced_dispatch(bfqd);
++
++ bfqq = bfq_select_queue(bfqd);
++ if (bfqq == NULL)
++ return 0;
++
++ max_dispatch = bfqd->bfq_quantum;
++ if (bfq_class_idle(bfqq))
++ max_dispatch = 1;
++
++ if (!bfq_bfqq_sync(bfqq))
++ max_dispatch = bfqd->bfq_max_budget_async_rq;
++
++ if (bfqq->dispatched >= max_dispatch) {
++ if (bfqd->busy_queues > 1)
++ return 0;
++ if (bfqq->dispatched >= 4 * max_dispatch)
++ return 0;
++ }
++
++ if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
++ return 0;
++
++ bfq_clear_bfqq_wait_request(bfqq);
++ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++ if (!bfq_dispatch_request(bfqd, bfqq))
++ return 0;
++
++ bfq_log_bfqq(bfqd, bfqq, "dispatched one request of %d (max_disp %d)",
++ bfqq->pid, max_dispatch);
++
++ return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits. Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++ struct bfq_data *bfqd = bfqq->bfqd;
++
++ BUG_ON(atomic_read(&bfqq->ref) <= 0);
++
++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
++ atomic_read(&bfqq->ref));
++ if (!atomic_dec_and_test(&bfqq->ref))
++ return;
++
++ BUG_ON(rb_first(&bfqq->sort_list) != NULL);
++ BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++ BUG_ON(bfqq->entity.tree != NULL);
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqd->in_service_queue == bfqq);
++
++ if (bfq_bfqq_sync(bfqq))
++ /*
++ * The fact that this queue is being destroyed does not
++ * invalidate the fact that this queue may have been
++ * activated during the current burst. As a consequence,
++ * although the queue does not exist anymore, and hence
++ * needs to be removed from the burst list if there,
++ * the burst size has not to be decremented.
++ */
++ hlist_del_init(&bfqq->burst_list_node);
++
++ bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++ kmem_cache_free(bfq_pool, bfqq);
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++ struct bfq_queue *__bfqq, *next;
++
++ /*
++ * If this queue was scheduled to merge with another queue, be
++ * sure to drop the reference taken on that queue (and others in
++ * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++ */
++ __bfqq = bfqq->new_bfqq;
++ while (__bfqq) {
++ if (__bfqq == bfqq)
++ break;
++ next = __bfqq->new_bfqq;
++ bfq_put_queue(__bfqq);
++ __bfqq = next;
++ }
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ if (bfqq == bfqd->in_service_queue) {
++ __bfq_bfqq_expire(bfqd, bfqq);
++ bfq_schedule_dispatch(bfqd);
++ }
++
++ bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++}
++
++static inline void bfq_init_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++
++ bic->ttime.last_end_request = jiffies;
++ /*
++ * A newly created bic indicates that the process has just
++ * started doing I/O, and is probably mapping into memory its
++ * executable and libraries: it definitely needs weight raising.
++ * There is however the possibility that the process performs,
++ * for a while, I/O close to some other process. EQM intercepts
++ * this behavior and may merge the queue corresponding to the
++ * process with some other queue, BEFORE the weight of the queue
++ * is raised. Merged queues are not weight-raised (they are assumed
++ * to belong to processes that benefit only from high throughput).
++ * If the merge is basically the consequence of an accident, then
++ * the queue will be split soon and will get back its old weight.
++ * It is then important to write down somewhere that this queue
++ * does need weight raising, even if it did not make it to get its
++ * weight raised before being merged. To this purpose, we overload
++ * the field raising_time_left and assign 1 to it, to mark the queue
++ * as needing weight raising.
++ */
++ bic->wr_time_left = 1;
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++ struct bfq_io_cq *bic = icq_to_bic(icq);
++ struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++ if (bic->bfqq[BLK_RW_ASYNC]) {
++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
++ bic->bfqq[BLK_RW_ASYNC] = NULL;
++ }
++
++ if (bic->bfqq[BLK_RW_SYNC]) {
++ /*
++ * If the bic is using a shared queue, put the reference
++ * taken on the io_context when the bic started using a
++ * shared bfq_queue.
++ */
++ if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
++ put_io_context(icq->ioc);
++ bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
++ bic->bfqq[BLK_RW_SYNC] = NULL;
++ }
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_init_prio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++ struct task_struct *tsk = current;
++ int ioprio_class;
++
++ if (!bfq_bfqq_prio_changed(bfqq))
++ return;
++
++ ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ switch (ioprio_class) {
++ default:
++ dev_err(bfqq->bfqd->queue->backing_dev_info.dev,
++ "bfq: bad prio class %d\n", ioprio_class);
++ case IOPRIO_CLASS_NONE:
++ /*
++ * No prio set, inherit CPU scheduling settings.
++ */
++ bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
++ bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
++ break;
++ case IOPRIO_CLASS_RT:
++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
++ break;
++ case IOPRIO_CLASS_BE:
++ bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
++ break;
++ case IOPRIO_CLASS_IDLE:
++ bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
++ bfqq->entity.new_ioprio = 7;
++ bfq_clear_bfqq_idle_window(bfqq);
++ break;
++ }
++
++ if (bfqq->entity.new_ioprio < 0 ||
++ bfqq->entity.new_ioprio >= IOPRIO_BE_NR) {
++ printk(KERN_CRIT "bfq_init_prio_data: new_ioprio %d\n",
++ bfqq->entity.new_ioprio);
++ BUG();
++ }
++
++ bfqq->entity.ioprio_changed = 1;
++
++ bfq_clear_bfqq_prio_changed(bfqq);
++}
++
++static void bfq_changed_ioprio(struct bfq_io_cq *bic)
++{
++ struct bfq_data *bfqd;
++ struct bfq_queue *bfqq, *new_bfqq;
++ struct bfq_group *bfqg;
++ unsigned long uninitialized_var(flags);
++ int ioprio = bic->icq.ioc->ioprio;
++
++ bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++ &flags);
++ /*
++ * This condition may trigger on a newly created bic, be sure to
++ * drop the lock before returning.
++ */
++ if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
++ goto out;
++
++ bfqq = bic->bfqq[BLK_RW_ASYNC];
++ if (bfqq != NULL) {
++ bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
++ sched_data);
++ new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
++ GFP_ATOMIC);
++ if (new_bfqq != NULL) {
++ bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
++ bfq_log_bfqq(bfqd, bfqq,
++ "changed_ioprio: bfqq %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++ }
++
++ bfqq = bic->bfqq[BLK_RW_SYNC];
++ if (bfqq != NULL)
++ bfq_mark_bfqq_prio_changed(bfqq);
++
++ bic->ioprio = ioprio;
++
++out:
++ bfq_put_bfqd_unlock(bfqd, &flags);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ pid_t pid, int is_sync)
++{
++ RB_CLEAR_NODE(&bfqq->entity.rb_node);
++ INIT_LIST_HEAD(&bfqq->fifo);
++ INIT_HLIST_NODE(&bfqq->burst_list_node);
++
++ atomic_set(&bfqq->ref, 0);
++ bfqq->bfqd = bfqd;
++
++ bfq_mark_bfqq_prio_changed(bfqq);
++
++ if (is_sync) {
++ if (!bfq_class_idle(bfqq))
++ bfq_mark_bfqq_idle_window(bfqq);
++ bfq_mark_bfqq_sync(bfqq);
++ }
++ bfq_mark_bfqq_IO_bound(bfqq);
++
++ /* Tentative initial value to trade off between thr and lat */
++ bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++ bfqq->pid = pid;
++
++ bfqq->wr_coeff = 1;
++ bfqq->last_wr_start_finish = 0;
++ /*
++ * Set to the value for which bfqq will not be deemed as
++ * soft rt when it becomes backlogged.
++ */
++ bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies);
++}
++
++static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int is_sync,
++ struct bfq_io_cq *bic,
++ gfp_t gfp_mask)
++{
++ struct bfq_queue *bfqq, *new_bfqq = NULL;
++
++retry:
++ /* bic always exists here */
++ bfqq = bic_to_bfqq(bic, is_sync);
++
++ /*
++ * Always try a new alloc if we fall back to the OOM bfqq
++ * originally, since it should just be a temporary situation.
++ */
++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++ bfqq = NULL;
++ if (new_bfqq != NULL) {
++ bfqq = new_bfqq;
++ new_bfqq = NULL;
++ } else if (gfp_mask & __GFP_WAIT) {
++ spin_unlock_irq(bfqd->queue->queue_lock);
++ new_bfqq = kmem_cache_alloc_node(bfq_pool,
++ gfp_mask | __GFP_ZERO,
++ bfqd->queue->node);
++ spin_lock_irq(bfqd->queue->queue_lock);
++ if (new_bfqq != NULL)
++ goto retry;
++ } else {
++ bfqq = kmem_cache_alloc_node(bfq_pool,
++ gfp_mask | __GFP_ZERO,
++ bfqd->queue->node);
++ }
++
++ if (bfqq != NULL) {
++ bfq_init_bfqq(bfqd, bfqq, current->pid, is_sync);
++ bfq_init_prio_data(bfqq, bic);
++ bfq_init_entity(&bfqq->entity, bfqg);
++ bfq_log_bfqq(bfqd, bfqq, "allocated");
++ } else {
++ bfqq = &bfqd->oom_bfqq;
++ bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++ }
++ }
++
++ if (new_bfqq != NULL)
++ kmem_cache_free(bfq_pool, new_bfqq);
++
++ return bfqq;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++ struct bfq_group *bfqg,
++ int ioprio_class, int ioprio)
++{
++ switch (ioprio_class) {
++ case IOPRIO_CLASS_RT:
++ return &bfqg->async_bfqq[0][ioprio];
++ case IOPRIO_CLASS_NONE:
++ ioprio = IOPRIO_NORM;
++ /* fall through */
++ case IOPRIO_CLASS_BE:
++ return &bfqg->async_bfqq[1][ioprio];
++ case IOPRIO_CLASS_IDLE:
++ return &bfqg->async_idle_bfqq;
++ default:
++ BUG();
++ }
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++ struct bfq_group *bfqg, int is_sync,
++ struct bfq_io_cq *bic, gfp_t gfp_mask)
++{
++ const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++ const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++ struct bfq_queue **async_bfqq = NULL;
++ struct bfq_queue *bfqq = NULL;
++
++ if (!is_sync) {
++ async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++ ioprio);
++ bfqq = *async_bfqq;
++ }
++
++ if (bfqq == NULL)
++ bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++
++ /*
++ * Pin the queue now that it's allocated, scheduler exit will
++ * prune it.
++ */
++ if (!is_sync && *async_bfqq == NULL) {
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ *async_bfqq = bfqq;
++ }
++
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++ return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++ struct bfq_io_cq *bic)
++{
++ unsigned long elapsed = jiffies - bic->ttime.last_end_request;
++ unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
++
++ bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++ bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
++ bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) /
++ bic->ttime.ttime_samples;
++}
++
++static void bfq_update_io_seektime(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ sector_t sdist;
++ u64 total;
++
++ if (bfqq->last_request_pos < blk_rq_pos(rq))
++ sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
++ else
++ sdist = bfqq->last_request_pos - blk_rq_pos(rq);
++
++ /*
++ * Don't allow the seek distance to get too large from the
++ * odd fragment, pagein, etc.
++ */
++ if (bfqq->seek_samples == 0) /* first request, not really a seek */
++ sdist = 0;
++ else if (bfqq->seek_samples <= 60) /* second & third seek */
++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
++ else
++ sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
++
++ bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
++ bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
++ total = bfqq->seek_total + (bfqq->seek_samples/2);
++ do_div(total, bfqq->seek_samples);
++ bfqq->seek_mean = (sector_t)total;
++
++ bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
++ (u64)bfqq->seek_mean);
++}
++
++/*
++ * Disable idle window if the process thinks too long or seeks so much that
++ * it doesn't matter.
++ */
++static void bfq_update_idle_window(struct bfq_data *bfqd,
++ struct bfq_queue *bfqq,
++ struct bfq_io_cq *bic)
++{
++ int enable_idle;
++
++ /* Don't idle for async or idle io prio class. */
++ if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++ return;
++
++ /* Idle window just restored, statistics are meaningless. */
++ if (bfq_bfqq_just_split(bfqq))
++ return;
++
++ enable_idle = bfq_bfqq_idle_window(bfqq);
++
++ if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++ bfqd->bfq_slice_idle == 0 ||
++ (bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
++ bfqq->wr_coeff == 1))
++ enable_idle = 0;
++ else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
++ if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
++ bfqq->wr_coeff == 1)
++ enable_idle = 0;
++ else
++ enable_idle = 1;
++ }
++ bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
++ enable_idle);
++
++ if (enable_idle)
++ bfq_mark_bfqq_idle_window(bfqq);
++ else
++ bfq_clear_bfqq_idle_window(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq. Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ struct request *rq)
++{
++ struct bfq_io_cq *bic = RQ_BIC(rq);
++
++ if (rq->cmd_flags & REQ_META)
++ bfqq->meta_pending++;
++
++ bfq_update_io_thinktime(bfqd, bic);
++ bfq_update_io_seektime(bfqd, bfqq, rq);
++ if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) {
++ bfq_clear_bfqq_constantly_seeky(bfqq);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
++ !BFQQ_SEEKY(bfqq))
++ bfq_update_idle_window(bfqd, bfqq, bic);
++ bfq_clear_bfqq_just_split(bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq,
++ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
++ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
++ (long long unsigned)bfqq->seek_mean);
++
++ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++ if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++ int small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++ blk_rq_sectors(rq) < 32;
++ int budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++ /*
++ * There is just this request queued: if the request
++ * is small and the queue is not to be expired, then
++ * just exit.
++ *
++ * In this way, if the disk is being idled to wait for
++ * a new request from the in-service queue, we avoid
++ * unplugging the device and committing the disk to serve
++ * just a small request. On the contrary, we wait for
++ * the block layer to decide when to unplug the device:
++ * hopefully, new requests will be merged to this one
++ * quickly, then the device will be unplugged and
++ * larger requests will be dispatched.
++ */
++ if (small_req && !budget_timeout)
++ return;
++
++ /*
++ * A large enough request arrived, or the queue is to
++ * be expired: in both cases disk idling is to be
++ * stopped, so clear wait_request flag and reset
++ * timer.
++ */
++ bfq_clear_bfqq_wait_request(bfqq);
++ del_timer(&bfqd->idle_slice_timer);
++
++ /*
++ * The queue is not empty, because a new request just
++ * arrived. Hence we can safely expire the queue, in
++ * case of budget timeout, without risking that the
++ * timestamps of the queue are not updated correctly.
++ * See [1] for more details.
++ */
++ if (budget_timeout)
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++
++ /*
++ * Let the request rip immediately, or let a new queue be
++ * selected if bfqq has just been expired.
++ */
++ __blk_run_queue(bfqd->queue);
++ }
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
++
++ assert_spin_locked(bfqd->queue->queue_lock);
++
++ /*
++ * An unplug may trigger a requeue of a request from the device
++ * driver: make sure we are in process context while trying to
++ * merge two bfq_queues.
++ */
++ if (!in_interrupt()) {
++ new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++ if (new_bfqq != NULL) {
++ if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++ new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++ /*
++ * Release the request's reference to the old bfqq
++ * and make sure one is taken to the shared queue.
++ */
++ new_bfqq->allocated[rq_data_dir(rq)]++;
++ bfqq->allocated[rq_data_dir(rq)]--;
++ atomic_inc(&new_bfqq->ref);
++ bfq_put_queue(bfqq);
++ if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++ bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++ bfqq, new_bfqq);
++ rq->elv.priv[1] = new_bfqq;
++ bfqq = new_bfqq;
++ } else
++ bfq_bfqq_increase_failed_cooperations(bfqq);
++ }
++
++ bfq_init_prio_data(bfqq, RQ_BIC(rq));
++
++ bfq_add_request(rq);
++
++ /*
++ * Here a newly-created bfq_queue has already started a weight-raising
++ * period: clear raising_time_left to prevent bfq_bfqq_save_state()
++ * from assigning it a full weight-raising period. See the detailed
++ * comments about this field in bfq_init_icq().
++ */
++ if (bfqq->bic != NULL)
++ bfqq->bic->wr_time_left = 0;
++ rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
++ list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++ bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++ bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
++ bfqd->rq_in_driver);
++
++ if (bfqd->hw_tag == 1)
++ return;
++
++ /*
++ * This sample is valid if the number of outstanding requests
++ * is large enough to allow a queueing behavior. Note that the
++ * sum is not exact, as it's not taking into account deactivated
++ * requests.
++ */
++ if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++ return;
++
++ if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++ return;
++
++ bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++ bfqd->max_rq_in_driver = 0;
++ bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++ struct bfq_data *bfqd = bfqq->bfqd;
++ bool sync = bfq_bfqq_sync(bfqq);
++
++ bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)",
++ blk_rq_sectors(rq), sync);
++
++ bfq_update_hw_tag(bfqd);
++
++ BUG_ON(!bfqd->rq_in_driver);
++ BUG_ON(!bfqq->dispatched);
++ bfqd->rq_in_driver--;
++ bfqq->dispatched--;
++
++ if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->busy_in_flight_queues);
++ bfqd->busy_in_flight_queues--;
++ if (bfq_bfqq_constantly_seeky(bfqq)) {
++ BUG_ON(!bfqd->
++ const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ }
++
++ if (sync) {
++ bfqd->sync_flight--;
++ RQ_BIC(rq)->ttime.last_end_request = jiffies;
++ }
++
++ /*
++ * If we are waiting to discover whether the request pattern of the
++ * task associated with the queue is actually isochronous, and
++ * both requisites for this condition to hold are satisfied, then
++ * compute soft_rt_next_start (see the comments to the function
++ * bfq_bfqq_softrt_next_start()).
++ */
++ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++ RB_EMPTY_ROOT(&bfqq->sort_list))
++ bfqq->soft_rt_next_start =
++ bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++ /*
++ * If this is the in-service queue, check if it needs to be expired,
++ * or if we want to idle in case it has no pending requests.
++ */
++ if (bfqd->in_service_queue == bfqq) {
++ if (bfq_bfqq_budget_new(bfqq))
++ bfq_set_budget_timeout(bfqd);
++
++ if (bfq_bfqq_must_idle(bfqq)) {
++ bfq_arm_slice_timer(bfqd);
++ goto out;
++ } else if (bfq_may_expire_for_budg_timeout(bfqq))
++ bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++ else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++ (bfqq->dispatched == 0 ||
++ !bfq_bfqq_must_not_expire(bfqq)))
++ bfq_bfqq_expire(bfqd, bfqq, 0,
++ BFQ_BFQQ_NO_MORE_REQUESTS);
++ }
++
++ if (!bfqd->rq_in_driver)
++ bfq_schedule_dispatch(bfqd);
++
++out:
++ return;
++}
++
++static inline int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++ if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++ bfq_clear_bfqq_must_alloc(bfqq);
++ return ELV_MQUEUE_MUST;
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, int rw)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct task_struct *tsk = current;
++ struct bfq_io_cq *bic;
++ struct bfq_queue *bfqq;
++
++ /*
++ * Don't force setup of a queue from here, as a call to may_queue
++ * does not necessarily imply that a request actually will be
++ * queued. So just lookup a possibly existing queue, or return
++ * 'may queue' if that fails.
++ */
++ bic = bfq_bic_lookup(bfqd, tsk->io_context);
++ if (bic == NULL)
++ return ELV_MQUEUE_MAY;
++
++ bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
++ if (bfqq != NULL) {
++ bfq_init_prio_data(bfqq, bic);
++
++ return __bfq_may_queue(bfqq);
++ }
++
++ return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++ struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++ if (bfqq != NULL) {
++ const int rw = rq_data_dir(rq);
++
++ BUG_ON(!bfqq->allocated[rw]);
++ bfqq->allocated[rw]--;
++
++ rq->elv.priv[0] = NULL;
++ rq->elv.priv[1] = NULL;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to said bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++ put_io_context(bic->icq.ioc);
++
++ if (bfqq_process_refs(bfqq) == 1) {
++ bfqq->pid = current->pid;
++ bfq_clear_bfqq_coop(bfqq);
++ bfq_clear_bfqq_split_coop(bfqq);
++ return bfqq;
++ }
++
++ bic_set_bfqq(bic, NULL, 1);
++
++ bfq_put_cooperator(bfqq);
++
++ bfq_put_queue(bfqq);
++ return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++ struct bio *bio, gfp_t gfp_mask)
++{
++ struct bfq_data *bfqd = q->elevator->elevator_data;
++ struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++ const int rw = rq_data_dir(rq);
++ const int is_sync = rq_is_sync(rq);
++ struct bfq_queue *bfqq;
++ struct bfq_group *bfqg;
++ unsigned long flags;
++ bool split = false;
++
++ might_sleep_if(gfp_mask & __GFP_WAIT);
++
++ bfq_changed_ioprio(bic);
++
++ spin_lock_irqsave(q->queue_lock, flags);
++
++ if (bic == NULL)
++ goto queue_fail;
++
++ bfqg = bfq_bic_update_cgroup(bic);
++
++new_queue:
++ bfqq = bic_to_bfqq(bic, is_sync);
++ if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++ bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++ bic_set_bfqq(bic, bfqq, is_sync);
++ if (split && is_sync) {
++ if ((bic->was_in_burst_list && bfqd->large_burst) ||
++ bic->saved_in_large_burst)
++ bfq_mark_bfqq_in_large_burst(bfqq);
++ else {
++ bfq_clear_bfqq_in_large_burst(bfqq);
++ if (bic->was_in_burst_list)
++ hlist_add_head(&bfqq->burst_list_node,
++ &bfqd->burst_list);
++ }
++ }
++ } else {
++ /* If the queue was seeky for too long, break it apart. */
++ if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++ bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++ bfqq = bfq_split_bfqq(bic, bfqq);
++ split = true;
++ if (!bfqq)
++ goto new_queue;
++ }
++ }
++
++ bfqq->allocated[rw]++;
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
++ atomic_read(&bfqq->ref));
++
++ rq->elv.priv[0] = bic;
++ rq->elv.priv[1] = bfqq;
++
++ /*
++ * If a bfq_queue has only one process reference, it is owned
++ * by only one bfq_io_cq: we can set the bic field of the
++ * bfq_queue to the address of that structure. Also, if the
++ * queue has just been split, mark a flag so that the
++ * information is available to the other scheduler hooks.
++ */
++ if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
++ bfqq->bic = bic;
++ if (split) {
++ bfq_mark_bfqq_just_split(bfqq);
++ /*
++ * If the queue has just been split from a shared
++ * queue, restore the idle window and the possible
++ * weight raising period.
++ */
++ bfq_bfqq_resume_state(bfqq, bic);
++ }
++ }
++
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 0;
++
++queue_fail:
++ bfq_schedule_dispatch(bfqd);
++ spin_unlock_irqrestore(q->queue_lock, flags);
++
++ return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++ struct bfq_data *bfqd =
++ container_of(work, struct bfq_data, unplug_work);
++ struct request_queue *q = bfqd->queue;
++
++ spin_lock_irq(q->queue_lock);
++ __blk_run_queue(q);
++ spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static void bfq_idle_slice_timer(unsigned long data)
++{
++ struct bfq_data *bfqd = (struct bfq_data *)data;
++ struct bfq_queue *bfqq;
++ unsigned long flags;
++ enum bfqq_expiration reason;
++
++ spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++ bfqq = bfqd->in_service_queue;
++ /*
++ * Theoretical race here: the in-service queue can be NULL or
++ * different from the queue that was idling if the timer handler
++ * spins on the queue_lock and a new request arrives for the
++ * current queue and there is a full dispatch cycle that changes
++ * the in-service queue. This can hardly happen, but in the worst
++ * case we just expire a queue too early.
++ */
++ if (bfqq != NULL) {
++ bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++ if (bfq_bfqq_budget_timeout(bfqq))
++ /*
++ * Also here the queue can be safely expired
++ * for budget timeout without wasting
++ * guarantees
++ */
++ reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++ else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++ /*
++ * The queue may not be empty upon timer expiration,
++ * because we may not disable the timer when the
++ * first request of the in-service queue arrives
++ * during disk idling.
++ */
++ reason = BFQ_BFQQ_TOO_IDLE;
++ else
++ goto schedule_dispatch;
++
++ bfq_bfqq_expire(bfqd, bfqq, 1, reason);
++ }
++
++schedule_dispatch:
++ bfq_schedule_dispatch(bfqd);
++
++ spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++ del_timer_sync(&bfqd->idle_slice_timer);
++ cancel_work_sync(&bfqd->unplug_work);
++}
++
++static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++ struct bfq_queue **bfqq_ptr)
++{
++ struct bfq_group *root_group = bfqd->root_group;
++ struct bfq_queue *bfqq = *bfqq_ptr;
++
++ bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++ if (bfqq != NULL) {
++ bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
++ bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ *bfqq_ptr = NULL;
++ }
++}
++
++/*
++ * Release all the bfqg references to its async queues. If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++ int i, j;
++
++ for (i = 0; i < 2; i++)
++ for (j = 0; j < IOPRIO_BE_NR; j++)
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++ __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ struct request_queue *q = bfqd->queue;
++ struct bfq_queue *bfqq, *n;
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ spin_lock_irq(q->queue_lock);
++
++ BUG_ON(bfqd->in_service_queue != NULL);
++ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++ bfq_deactivate_bfqq(bfqd, bfqq, 0);
++
++ bfq_disconnect_groups(bfqd);
++ spin_unlock_irq(q->queue_lock);
++
++ bfq_shutdown_timer_wq(bfqd);
++
++ synchronize_rcu();
++
++ BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++ bfq_free_root_group(bfqd);
++ kfree(bfqd);
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++ struct bfq_group *bfqg;
++ struct bfq_data *bfqd;
++ struct elevator_queue *eq;
++
++ eq = elevator_alloc(q, e);
++ if (eq == NULL)
++ return -ENOMEM;
++
++ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++ if (bfqd == NULL) {
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++ eq->elevator_data = bfqd;
++
++ /*
++ * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++ * Grab a permanent reference to it, so that the normal code flow
++ * will not attempt to free it.
++ */
++ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, 1, 0);
++ atomic_inc(&bfqd->oom_bfqq.ref);
++ bfqd->oom_bfqq.entity.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
++ bfqd->oom_bfqq.entity.new_ioprio_class = IOPRIO_CLASS_BE;
++ /*
++ * Trigger weight initialization, according to ioprio, at the
++ * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
++ * class won't be changed any more.
++ */
++ bfqd->oom_bfqq.entity.ioprio_changed = 1;
++
++ bfqd->queue = q;
++
++ spin_lock_irq(q->queue_lock);
++ q->elevator = eq;
++ spin_unlock_irq(q->queue_lock);
++
++ bfqg = bfq_alloc_root_group(bfqd, q->node);
++ if (bfqg == NULL) {
++ kfree(bfqd);
++ kobject_put(&eq->kobj);
++ return -ENOMEM;
++ }
++
++ bfqd->root_group = bfqg;
++ bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++#ifdef CONFIG_CGROUP_BFQIO
++ bfqd->active_numerous_groups = 0;
++#endif
++
++ init_timer(&bfqd->idle_slice_timer);
++ bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++ bfqd->idle_slice_timer.data = (unsigned long)bfqd;
++
++ bfqd->rq_pos_tree = RB_ROOT;
++ bfqd->queue_weights_tree = RB_ROOT;
++ bfqd->group_weights_tree = RB_ROOT;
++
++ INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++ INIT_LIST_HEAD(&bfqd->active_list);
++ INIT_LIST_HEAD(&bfqd->idle_list);
++ INIT_HLIST_HEAD(&bfqd->burst_list);
++
++ bfqd->hw_tag = -1;
++
++ bfqd->bfq_max_budget = bfq_default_max_budget;
++
++ bfqd->bfq_quantum = bfq_quantum;
++ bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++ bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++ bfqd->bfq_back_max = bfq_back_max;
++ bfqd->bfq_back_penalty = bfq_back_penalty;
++ bfqd->bfq_slice_idle = bfq_slice_idle;
++ bfqd->bfq_class_idle_last_service = 0;
++ bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
++ bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
++ bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
++
++ bfqd->bfq_coop_thresh = 2;
++ bfqd->bfq_failed_cooperations = 7000;
++ bfqd->bfq_requests_within_timer = 120;
++
++ bfqd->bfq_large_burst_thresh = 11;
++ bfqd->bfq_burst_interval = msecs_to_jiffies(500);
++
++ bfqd->low_latency = true;
++
++ bfqd->bfq_wr_coeff = 20;
++ bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++ bfqd->bfq_wr_max_time = 0;
++ bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++ bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++ bfqd->bfq_wr_max_softrt_rate = 7000; /*
++ * Approximate rate required
++ * to playback or record a
++ * high-definition compressed
++ * video.
++ */
++ bfqd->wr_busy_queues = 0;
++ bfqd->busy_in_flight_queues = 0;
++ bfqd->const_seeky_busy_in_flight_queues = 0;
++
++ /*
++ * Begin by assuming, optimistically, that the device peak rate is
++ * equal to the highest reference rate.
++ */
++ bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++ T_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)];
++ bfqd->device_speed = BFQ_BFQD_FAST;
++
++ return 0;
++}
++
++static void bfq_slab_kill(void)
++{
++ if (bfq_pool != NULL)
++ kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++ bfq_pool = KMEM_CACHE(bfq_queue, 0);
++ if (bfq_pool == NULL)
++ return -ENOMEM;
++ return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++ return sprintf(page, "%d\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++ size_t count)
++{
++ unsigned long new_val;
++ int ret = kstrtoul(page, 10, &new_val);
++
++ if (ret == 0)
++ *var = new_val;
++
++ return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++ jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++ jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++ struct bfq_queue *bfqq;
++ struct bfq_data *bfqd = e->elevator_data;
++ ssize_t num_char = 0;
++
++ num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++ bfqd->queued);
++
++ spin_lock_irq(bfqd->queue->queue_lock);
++
++ num_char += sprintf(page + num_char, "Active:\n");
++ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ bfqq->queued[0],
++ bfqq->queued[1],
++ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ num_char += sprintf(page + num_char, "Idle:\n");
++ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++ num_char += sprintf(page + num_char,
++ "pid%d: weight %hu, dur %d/%u\n",
++ bfqq->pid,
++ bfqq->entity.weight,
++ jiffies_to_msecs(jiffies -
++ bfqq->last_wr_start_finish),
++ jiffies_to_msecs(bfqq->wr_cur_max_time));
++ }
++
++ spin_unlock_irq(bfqd->queue->queue_lock);
++
++ return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
++static ssize_t __FUNC(struct elevator_queue *e, char *page) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned int __data = __VAR; \
++ if (__CONV) \
++ __data = jiffies_to_msecs(__data); \
++ return bfq_var_show(__data, (page)); \
++}
++SHOW_FUNCTION(bfq_quantum_show, bfqd->bfq_quantum, 0);
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_max_budget_async_rq_show,
++ bfqd->bfq_max_budget_async_rq, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
++SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++ 1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
++static ssize_t \
++__FUNC(struct elevator_queue *e, const char *page, size_t count) \
++{ \
++ struct bfq_data *bfqd = e->elevator_data; \
++ unsigned long uninitialized_var(__data); \
++ int ret = bfq_var_store(&__data, (page), count); \
++ if (__data < (MIN)) \
++ __data = (MIN); \
++ else if (__data > (MAX)) \
++ __data = (MAX); \
++ if (__CONV) \
++ *(__PTR) = msecs_to_jiffies(__data); \
++ else \
++ *(__PTR) = __data; \
++ return ret; \
++}
++STORE_FUNCTION(bfq_quantum_store, &bfqd->bfq_quantum, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++ INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
++ 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++ 1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++ INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++ &bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++ INT_MAX, 0);
++#undef STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ return count;
++}
++
++static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
++{
++ u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++ if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
++ return bfq_calc_max_budget(bfqd->peak_rate, timeout);
++ else
++ return bfq_default_max_budget;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data == 0)
++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++ else {
++ if (__data > INT_MAX)
++ __data = INT_MAX;
++ bfqd->bfq_max_budget = __data;
++ }
++
++ bfqd->bfq_user_max_budget = __data;
++
++ return ret;
++}
++
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data < 1)
++ __data = 1;
++ else if (__data > INT_MAX)
++ __data = INT_MAX;
++
++ bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
++ if (bfqd->bfq_user_max_budget == 0)
++ bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++
++ return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++ const char *page, size_t count)
++{
++ struct bfq_data *bfqd = e->elevator_data;
++ unsigned long uninitialized_var(__data);
++ int ret = bfq_var_store(&__data, (page), count);
++
++ if (__data > 1)
++ __data = 1;
++ if (__data == 0 && bfqd->low_latency != 0)
++ bfq_end_wr(bfqd);
++ bfqd->low_latency = __data;
++
++ return ret;
++}
++
++#define BFQ_ATTR(name) \
++ __ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++ BFQ_ATTR(quantum),
++ BFQ_ATTR(fifo_expire_sync),
++ BFQ_ATTR(fifo_expire_async),
++ BFQ_ATTR(back_seek_max),
++ BFQ_ATTR(back_seek_penalty),
++ BFQ_ATTR(slice_idle),
++ BFQ_ATTR(max_budget),
++ BFQ_ATTR(max_budget_async_rq),
++ BFQ_ATTR(timeout_sync),
++ BFQ_ATTR(timeout_async),
++ BFQ_ATTR(low_latency),
++ BFQ_ATTR(wr_coeff),
++ BFQ_ATTR(wr_max_time),
++ BFQ_ATTR(wr_rt_max_time),
++ BFQ_ATTR(wr_min_idle_time),
++ BFQ_ATTR(wr_min_inter_arr_async),
++ BFQ_ATTR(wr_max_softrt_rate),
++ BFQ_ATTR(weights),
++ __ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++ .ops = {
++ .elevator_merge_fn = bfq_merge,
++ .elevator_merged_fn = bfq_merged_request,
++ .elevator_merge_req_fn = bfq_merged_requests,
++ .elevator_allow_merge_fn = bfq_allow_merge,
++ .elevator_dispatch_fn = bfq_dispatch_requests,
++ .elevator_add_req_fn = bfq_insert_request,
++ .elevator_activate_req_fn = bfq_activate_request,
++ .elevator_deactivate_req_fn = bfq_deactivate_request,
++ .elevator_completed_req_fn = bfq_completed_request,
++ .elevator_former_req_fn = elv_rb_former_request,
++ .elevator_latter_req_fn = elv_rb_latter_request,
++ .elevator_init_icq_fn = bfq_init_icq,
++ .elevator_exit_icq_fn = bfq_exit_icq,
++ .elevator_set_req_fn = bfq_set_request,
++ .elevator_put_req_fn = bfq_put_request,
++ .elevator_may_queue_fn = bfq_may_queue,
++ .elevator_init_fn = bfq_init_queue,
++ .elevator_exit_fn = bfq_exit_queue,
++ },
++ .icq_size = sizeof(struct bfq_io_cq),
++ .icq_align = __alignof__(struct bfq_io_cq),
++ .elevator_attrs = bfq_attrs,
++ .elevator_name = "bfq",
++ .elevator_owner = THIS_MODULE,
++};
++
++static int __init bfq_init(void)
++{
++ /*
++ * Can be 0 on HZ < 1000 setups.
++ */
++ if (bfq_slice_idle == 0)
++ bfq_slice_idle = 1;
++
++ if (bfq_timeout_async == 0)
++ bfq_timeout_async = 1;
++
++ if (bfq_slab_setup())
++ return -ENOMEM;
++
++ /*
++ * Times to load large popular applications for the typical systems
++ * installed on the reference devices (see the comments before the
++ * definitions of the two arrays).
++ */
++ T_slow[0] = msecs_to_jiffies(2600);
++ T_slow[1] = msecs_to_jiffies(1000);
++ T_fast[0] = msecs_to_jiffies(5500);
++ T_fast[1] = msecs_to_jiffies(2000);
++
++ /*
++ * Thresholds that determine the switch between speed classes (see
++ * the comments before the definition of the array).
++ */
++ device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2;
++ device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
++
++ elv_register(&iosched_bfq);
++ pr_info("BFQ I/O-scheduler version: v7r7");
++
++ return 0;
++}
++
++static void __exit bfq_exit(void)
++{
++ elv_unregister(&iosched_bfq);
++ bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/block/bfq-sched.c linux-xbian-imx6/block/bfq-sched.c
+--- linux-4.1.3/block/bfq-sched.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/block/bfq-sched.c 2015-07-27 23:13:03.604123194 +0200
+@@ -0,0 +1,1186 @@
++/*
++ * BFQ: Hierarchical B-WF2Q+ scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ * Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++#define for_each_entity(entity) \
++ for (; entity != NULL; entity = entity->parent)
++
++#define for_each_entity_safe(entity, parent) \
++ for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
++
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ int extract,
++ struct bfq_data *bfqd);
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++ struct bfq_entity *bfqg_entity;
++ struct bfq_group *bfqg;
++ struct bfq_sched_data *group_sd;
++
++ BUG_ON(next_in_service == NULL);
++
++ group_sd = next_in_service->sched_data;
++
++ bfqg = container_of(group_sd, struct bfq_group, sched_data);
++ /*
++ * bfq_group's my_entity field is not NULL only if the group
++ * is not the root group. We must not touch the root entity
++ * as it must never become an in-service entity.
++ */
++ bfqg_entity = bfqg->my_entity;
++ if (bfqg_entity != NULL)
++ bfqg_entity->budget = next_in_service->budget;
++}
++
++static int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++ struct bfq_entity *next_in_service;
++
++ if (sd->in_service_entity != NULL)
++ /* will update/requeue at the end of service */
++ return 0;
++
++ /*
++ * NOTE: this can be improved in many ways, such as returning
++ * 1 (and thus propagating upwards the update) only when the
++ * budget changes, or caching the bfqq that will be scheduled
++ * next from this subtree. By now we worry more about
++ * correctness than about performance...
++ */
++ next_in_service = bfq_lookup_next_entity(sd, 0, NULL);
++ sd->next_in_service = next_in_service;
++
++ if (next_in_service != NULL)
++ bfq_update_budget(next_in_service);
++
++ return 1;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++ struct bfq_entity *entity)
++{
++ BUG_ON(sd->next_in_service != entity);
++}
++#else
++#define for_each_entity(entity) \
++ for (; entity != NULL; entity = NULL)
++
++#define for_each_entity_safe(entity, parent) \
++ for (parent = NULL; entity != NULL; entity = parent)
++
++static inline int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++ return 0;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++ struct bfq_entity *entity)
++{
++}
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++}
++#endif
++
++/*
++ * Shift for timestamp calculations. This actually limits the maximum
++ * service allowed in one timestamp delta (small shift values increase it),
++ * the maximum total weight that can be used for the queues in the system
++ * (big shift values increase it), and the period of virtual time
++ * wraparounds.
++ */
++#define WFQ_SERVICE_SHIFT 22
++
++/**
++ * bfq_gt - compare two timestamps.
++ * @a: first ts.
++ * @b: second ts.
++ *
++ * Return @a > @b, dealing with wrapping correctly.
++ */
++static inline int bfq_gt(u64 a, u64 b)
++{
++ return (s64)(a - b) > 0;
++}
++
++static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = NULL;
++
++ BUG_ON(entity == NULL);
++
++ if (entity->my_sched_data == NULL)
++ bfqq = container_of(entity, struct bfq_queue, entity);
++
++ return bfqq;
++}
++
++
++/**
++ * bfq_delta - map service into the virtual time domain.
++ * @service: amount of service.
++ * @weight: scale factor (weight of an entity or weight sum).
++ */
++static inline u64 bfq_delta(unsigned long service,
++ unsigned long weight)
++{
++ u64 d = (u64)service << WFQ_SERVICE_SHIFT;
++
++ do_div(d, weight);
++ return d;
++}
++
++/**
++ * bfq_calc_finish - assign the finish time to an entity.
++ * @entity: the entity to act upon.
++ * @service: the service to be charged to the entity.
++ */
++static inline void bfq_calc_finish(struct bfq_entity *entity,
++ unsigned long service)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ BUG_ON(entity->weight == 0);
++
++ entity->finish = entity->start +
++ bfq_delta(service, entity->weight);
++
++ if (bfqq != NULL) {
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: serv %lu, w %d",
++ service, entity->weight);
++ bfq_log_bfqq(bfqq->bfqd, bfqq,
++ "calc_finish: start %llu, finish %llu, delta %llu",
++ entity->start, entity->finish,
++ bfq_delta(service, entity->weight));
++ }
++}
++
++/**
++ * bfq_entity_of - get an entity from a node.
++ * @node: the node field of the entity.
++ *
++ * Convert a node pointer to the relative entity. This is used only
++ * to simplify the logic of some functions and not as the generic
++ * conversion mechanism because, e.g., in the tree walking functions,
++ * the check for a %NULL value would be redundant.
++ */
++static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
++{
++ struct bfq_entity *entity = NULL;
++
++ if (node != NULL)
++ entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ return entity;
++}
++
++/**
++ * bfq_extract - remove an entity from a tree.
++ * @root: the tree root.
++ * @entity: the entity to remove.
++ */
++static inline void bfq_extract(struct rb_root *root,
++ struct bfq_entity *entity)
++{
++ BUG_ON(entity->tree != root);
++
++ entity->tree = NULL;
++ rb_erase(&entity->rb_node, root);
++}
++
++/**
++ * bfq_idle_extract - extract an entity from the idle tree.
++ * @st: the service tree of the owning @entity.
++ * @entity: the entity being removed.
++ */
++static void bfq_idle_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *next;
++
++ BUG_ON(entity->tree != &st->idle);
++
++ if (entity == st->first_idle) {
++ next = rb_next(&entity->rb_node);
++ st->first_idle = bfq_entity_of(next);
++ }
++
++ if (entity == st->last_idle) {
++ next = rb_prev(&entity->rb_node);
++ st->last_idle = bfq_entity_of(next);
++ }
++
++ bfq_extract(&st->idle, entity);
++
++ if (bfqq != NULL)
++ list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_insert - generic tree insertion.
++ * @root: tree root.
++ * @entity: entity to insert.
++ *
++ * This is used for the idle and the active tree, since they are both
++ * ordered by finish time.
++ */
++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
++{
++ struct bfq_entity *entry;
++ struct rb_node **node = &root->rb_node;
++ struct rb_node *parent = NULL;
++
++ BUG_ON(entity->tree != NULL);
++
++ while (*node != NULL) {
++ parent = *node;
++ entry = rb_entry(parent, struct bfq_entity, rb_node);
++
++ if (bfq_gt(entry->finish, entity->finish))
++ node = &parent->rb_left;
++ else
++ node = &parent->rb_right;
++ }
++
++ rb_link_node(&entity->rb_node, parent, node);
++ rb_insert_color(&entity->rb_node, root);
++
++ entity->tree = root;
++}
++
++/**
++ * bfq_update_min - update the min_start field of a entity.
++ * @entity: the entity to update.
++ * @node: one of its children.
++ *
++ * This function is called when @entity may store an invalid value for
++ * min_start due to updates to the active tree. The function assumes
++ * that the subtree rooted at @node (which may be its left or its right
++ * child) has a valid min_start value.
++ */
++static inline void bfq_update_min(struct bfq_entity *entity,
++ struct rb_node *node)
++{
++ struct bfq_entity *child;
++
++ if (node != NULL) {
++ child = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entity->min_start, child->min_start))
++ entity->min_start = child->min_start;
++ }
++}
++
++/**
++ * bfq_update_active_node - recalculate min_start.
++ * @node: the node to update.
++ *
++ * @node may have changed position or one of its children may have moved,
++ * this function updates its min_start value. The left and right subtrees
++ * are assumed to hold a correct min_start value.
++ */
++static inline void bfq_update_active_node(struct rb_node *node)
++{
++ struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
++
++ entity->min_start = entity->start;
++ bfq_update_min(entity, node->rb_right);
++ bfq_update_min(entity, node->rb_left);
++}
++
++/**
++ * bfq_update_active_tree - update min_start for the whole active tree.
++ * @node: the starting node.
++ *
++ * @node must be the deepest modified node after an update. This function
++ * updates its min_start using the values held by its children, assuming
++ * that they did not change, and then updates all the nodes that may have
++ * changed in the path to the root. The only nodes that may have changed
++ * are the ones in the path or their siblings.
++ */
++static void bfq_update_active_tree(struct rb_node *node)
++{
++ struct rb_node *parent;
++
++up:
++ bfq_update_active_node(node);
++
++ parent = rb_parent(node);
++ if (parent == NULL)
++ return;
++
++ if (node == parent->rb_left && parent->rb_right != NULL)
++ bfq_update_active_node(parent->rb_right);
++ else if (parent->rb_left != NULL)
++ bfq_update_active_node(parent->rb_left);
++
++ node = parent;
++ goto up;
++}
++
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++ struct bfq_entity *entity,
++ struct rb_root *root);
++
++
++/**
++ * bfq_active_insert - insert an entity in the active tree of its
++ * group/device.
++ * @st: the service tree of the entity.
++ * @entity: the entity being inserted.
++ *
++ * The active tree is ordered by finish time, but an extra key is kept
++ * per each node, containing the minimum value for the start times of
++ * its children (and the node itself), so it's possible to search for
++ * the eligible node with the lowest finish time in logarithmic time.
++ */
++static void bfq_active_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node = &entity->rb_node;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ bfq_insert(&st->active, entity);
++
++ if (node->rb_left != NULL)
++ node = node->rb_left;
++ else if (node->rb_right != NULL)
++ node = node->rb_right;
++
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq != NULL)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
++#ifdef CONFIG_CGROUP_BFQIO
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ bfqg->active_entities++;
++ if (bfqg->active_entities == 2)
++ bfqd->active_numerous_groups++;
++ }
++#endif
++}
++
++/**
++ * bfq_ioprio_to_weight - calc a weight from an ioprio.
++ * @ioprio: the ioprio value to convert.
++ */
++static inline unsigned short bfq_ioprio_to_weight(int ioprio)
++{
++ BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
++ return IOPRIO_BE_NR - ioprio;
++}
++
++/**
++ * bfq_weight_to_ioprio - calc an ioprio from a weight.
++ * @weight: the weight value to convert.
++ *
++ * To preserve as mush as possible the old only-ioprio user interface,
++ * 0 is used as an escape ioprio value for weights (numerically) equal or
++ * larger than IOPRIO_BE_NR
++ */
++static inline unsigned short bfq_weight_to_ioprio(int weight)
++{
++ BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
++ return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
++}
++
++static inline void bfq_get_entity(struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++ if (bfqq != NULL) {
++ atomic_inc(&bfqq->ref);
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ }
++}
++
++/**
++ * bfq_find_deepest - find the deepest node that an extraction can modify.
++ * @node: the node being removed.
++ *
++ * Do the first step of an extraction in an rb tree, looking for the
++ * node that will replace @node, and returning the deepest node that
++ * the following modifications to the tree can touch. If @node is the
++ * last node in the tree return %NULL.
++ */
++static struct rb_node *bfq_find_deepest(struct rb_node *node)
++{
++ struct rb_node *deepest;
++
++ if (node->rb_right == NULL && node->rb_left == NULL)
++ deepest = rb_parent(node);
++ else if (node->rb_right == NULL)
++ deepest = node->rb_left;
++ else if (node->rb_left == NULL)
++ deepest = node->rb_right;
++ else {
++ deepest = rb_next(node);
++ if (deepest->rb_right != NULL)
++ deepest = deepest->rb_right;
++ else if (rb_parent(deepest) != node)
++ deepest = rb_parent(deepest);
++ }
++
++ return deepest;
++}
++
++/**
++ * bfq_active_extract - remove an entity from the active tree.
++ * @st: the service_tree containing the tree.
++ * @entity: the entity being removed.
++ */
++static void bfq_active_extract(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct rb_node *node;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd = NULL;
++ struct bfq_group *bfqg = NULL;
++ struct bfq_data *bfqd = NULL;
++#endif
++
++ node = bfq_find_deepest(&entity->rb_node);
++ bfq_extract(&st->active, entity);
++
++ if (node != NULL)
++ bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++ sd = entity->sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++ if (bfqq != NULL)
++ list_del(&bfqq->bfqq_list);
++#ifdef CONFIG_CGROUP_BFQIO
++ else { /* bfq_group */
++ BUG_ON(!bfqd);
++ bfq_weights_tree_remove(bfqd, entity,
++ &bfqd->group_weights_tree);
++ }
++ if (bfqg != bfqd->root_group) {
++ BUG_ON(!bfqg);
++ BUG_ON(!bfqd);
++ BUG_ON(!bfqg->active_entities);
++ bfqg->active_entities--;
++ if (bfqg->active_entities == 1) {
++ BUG_ON(!bfqd->active_numerous_groups);
++ bfqd->active_numerous_groups--;
++ }
++ }
++#endif
++}
++
++/**
++ * bfq_idle_insert - insert an entity into the idle tree.
++ * @st: the service tree containing the tree.
++ * @entity: the entity to insert.
++ */
++static void bfq_idle_insert(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
++ st->first_idle = entity;
++ if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
++ st->last_idle = entity;
++
++ bfq_insert(&st->idle, entity);
++
++ if (bfqq != NULL)
++ list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
++}
++
++/**
++ * bfq_forget_entity - remove an entity from the wfq trees.
++ * @st: the service tree.
++ * @entity: the entity being removed.
++ *
++ * Update the device status and forget everything about @entity, putting
++ * the device reference to it, if it is a queue. Entities belonging to
++ * groups are not refcounted.
++ */
++static void bfq_forget_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ struct bfq_sched_data *sd;
++
++ BUG_ON(!entity->on_st);
++
++ entity->on_st = 0;
++ st->wsum -= entity->weight;
++ if (bfqq != NULL) {
++ sd = entity->sched_data;
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
++ bfqq, atomic_read(&bfqq->ref));
++ bfq_put_queue(bfqq);
++ }
++}
++
++/**
++ * bfq_put_idle_entity - release the idle tree ref of an entity.
++ * @st: service tree for the entity.
++ * @entity: the entity being released.
++ */
++static void bfq_put_idle_entity(struct bfq_service_tree *st,
++ struct bfq_entity *entity)
++{
++ bfq_idle_extract(st, entity);
++ bfq_forget_entity(st, entity);
++}
++
++/**
++ * bfq_forget_idle - update the idle tree if necessary.
++ * @st: the service tree to act upon.
++ *
++ * To preserve the global O(log N) complexity we only remove one entry here;
++ * as the idle tree will not grow indefinitely this can be done safely.
++ */
++static void bfq_forget_idle(struct bfq_service_tree *st)
++{
++ struct bfq_entity *first_idle = st->first_idle;
++ struct bfq_entity *last_idle = st->last_idle;
++
++ if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
++ !bfq_gt(last_idle->finish, st->vtime)) {
++ /*
++ * Forget the whole idle tree, increasing the vtime past
++ * the last finish time of idle entities.
++ */
++ st->vtime = last_idle->finish;
++ }
++
++ if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
++ bfq_put_idle_entity(st, first_idle);
++}
++
++static struct bfq_service_tree *
++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
++ struct bfq_entity *entity)
++{
++ struct bfq_service_tree *new_st = old_st;
++
++ if (entity->ioprio_changed) {
++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++ unsigned short prev_weight, new_weight;
++ struct bfq_data *bfqd = NULL;
++ struct rb_root *root;
++#ifdef CONFIG_CGROUP_BFQIO
++ struct bfq_sched_data *sd;
++ struct bfq_group *bfqg;
++#endif
++
++ if (bfqq != NULL)
++ bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++ else {
++ sd = entity->my_sched_data;
++ bfqg = container_of(sd, struct bfq_group, sched_data);
++ BUG_ON(!bfqg);
++ bfqd = (struct bfq_data *)bfqg->bfqd;
++ BUG_ON(!bfqd);
++ }
++#endif
++
++ BUG_ON(old_st->wsum < entity->weight);
++ old_st->wsum -= entity->weight;
++
++ if (entity->new_weight != entity->orig_weight) {
++ if (entity->new_weight < BFQ_MIN_WEIGHT ||
++ entity->new_weight > BFQ_MAX_WEIGHT) {
++ printk(KERN_CRIT "update_weight_prio: "
++ "new_weight %d\n",
++ entity->new_weight);
++ BUG();
++ }
++ entity->orig_weight = entity->new_weight;
++ entity->ioprio =
++ bfq_weight_to_ioprio(entity->orig_weight);
++ } else if (entity->new_ioprio != entity->ioprio) {
++ entity->ioprio = entity->new_ioprio;
++ entity->orig_weight =
++ bfq_ioprio_to_weight(entity->ioprio);
++ } else
++ entity->new_weight = entity->orig_weight =
++ bfq_ioprio_to_weight(entity->ioprio);
++
++ entity->ioprio_class = entity->new_ioprio_class;
++ entity->ioprio_changed = 0;
++
++ /*
++ * NOTE: here we may be changing the weight too early,
++ * this will cause unfairness. The correct approach
++ * would have required additional complexity to defer
++ * weight changes to the proper time instants (i.e.,
++ * when entity->finish <= old_st->vtime).
++ */
++ new_st = bfq_entity_service_tree(entity);
++
++ prev_weight = entity->weight;
++ new_weight = entity->orig_weight *
++ (bfqq != NULL ? bfqq->wr_coeff : 1);
++ /*
++ * If the weight of the entity changes, remove the entity
++ * from its old weight counter (if there is a counter
++ * associated with the entity), and add it to the counter
++ * associated with its new weight.
++ */
++ if (prev_weight != new_weight) {
++ root = bfqq ? &bfqd->queue_weights_tree :
++ &bfqd->group_weights_tree;
++ bfq_weights_tree_remove(bfqd, entity, root);
++ }
++ entity->weight = new_weight;
++ /*
++ * Add the entity to its weights tree only if it is
++ * not associated with a weight-raised queue.
++ */
++ if (prev_weight != new_weight &&
++ (bfqq ? bfqq->wr_coeff == 1 : 1))
++ /* If we get here, root has been initialized. */
++ bfq_weights_tree_add(bfqd, entity, root);
++
++ new_st->wsum += entity->weight;
++
++ if (new_st != old_st)
++ entity->start = new_st->vtime;
++ }
++
++ return new_st;
++}
++
++/**
++ * bfq_bfqq_served - update the scheduler status after selection for
++ * service.
++ * @bfqq: the queue being served.
++ * @served: bytes to transfer.
++ *
++ * NOTE: this can be optimized, as the timestamps of upper level entities
++ * are synchronized every time a new bfqq is selected for service. By now,
++ * we keep it to better check consistency.
++ */
++static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++ struct bfq_service_tree *st;
++
++ for_each_entity(entity) {
++ st = bfq_entity_service_tree(entity);
++
++ entity->service += served;
++ BUG_ON(entity->service > entity->budget);
++ BUG_ON(st->wsum == 0);
++
++ st->vtime += bfq_delta(served, st->wsum);
++ bfq_forget_idle(st);
++ }
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
++}
++
++/**
++ * bfq_bfqq_charge_full_budget - set the service to the entity budget.
++ * @bfqq: the queue that needs a service update.
++ *
++ * When it's not possible to be fair in the service domain, because
++ * a queue is not consuming its budget fast enough (the meaning of
++ * fast depends on the timeout parameter), we charge it a full
++ * budget. In this way we should obtain a sort of time-domain
++ * fairness among all the seeky/slow queues.
++ */
++static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
++
++ bfq_bfqq_served(bfqq, entity->budget - entity->service);
++}
++
++/**
++ * __bfq_activate_entity - activate an entity.
++ * @entity: the entity being activated.
++ *
++ * Called whenever an entity is activated, i.e., it is not active and one
++ * of its children receives a new request, or has to be reactivated due to
++ * budget exhaustion. It uses the current budget of the entity (and the
++ * service received if @entity is active) of the queue to calculate its
++ * timestamps.
++ */
++static void __bfq_activate_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++ if (entity == sd->in_service_entity) {
++ BUG_ON(entity->tree != NULL);
++ /*
++ * If we are requeueing the current entity we have
++ * to take care of not charging to it service it has
++ * not received.
++ */
++ bfq_calc_finish(entity, entity->service);
++ entity->start = entity->finish;
++ sd->in_service_entity = NULL;
++ } else if (entity->tree == &st->active) {
++ /*
++ * Requeueing an entity due to a change of some
++ * next_in_service entity below it. We reuse the
++ * old start time.
++ */
++ bfq_active_extract(st, entity);
++ } else if (entity->tree == &st->idle) {
++ /*
++ * Must be on the idle tree, bfq_idle_extract() will
++ * check for that.
++ */
++ bfq_idle_extract(st, entity);
++ entity->start = bfq_gt(st->vtime, entity->finish) ?
++ st->vtime : entity->finish;
++ } else {
++ /*
++ * The finish time of the entity may be invalid, and
++ * it is in the past for sure, otherwise the queue
++ * would have been on the idle tree.
++ */
++ entity->start = st->vtime;
++ st->wsum += entity->weight;
++ bfq_get_entity(entity);
++
++ BUG_ON(entity->on_st);
++ entity->on_st = 1;
++ }
++
++ st = __bfq_entity_update_weight_prio(st, entity);
++ bfq_calc_finish(entity, entity->budget);
++ bfq_active_insert(st, entity);
++}
++
++/**
++ * bfq_activate_entity - activate an entity and its ancestors if necessary.
++ * @entity: the entity to activate.
++ *
++ * Activate @entity and all the entities on the path from it to the root.
++ */
++static void bfq_activate_entity(struct bfq_entity *entity)
++{
++ struct bfq_sched_data *sd;
++
++ for_each_entity(entity) {
++ __bfq_activate_entity(entity);
++
++ sd = entity->sched_data;
++ if (!bfq_update_next_in_service(sd))
++ /*
++ * No need to propagate the activation to the
++ * upper entities, as they will be updated when
++ * the in-service entity is rescheduled.
++ */
++ break;
++ }
++}
++
++/**
++ * __bfq_deactivate_entity - deactivate an entity from its service tree.
++ * @entity: the entity to deactivate.
++ * @requeue: if false, the entity will not be put into the idle tree.
++ *
++ * Deactivate an entity, independently from its previous state. If the
++ * entity was not on a service tree just return, otherwise if it is on
++ * any scheduler tree, extract it from that tree, and if necessary
++ * and if the caller did not specify @requeue, put it on the idle tree.
++ *
++ * Return %1 if the caller should update the entity hierarchy, i.e.,
++ * if the entity was in service or if it was the next_in_service for
++ * its sched_data; return %0 otherwise.
++ */
++static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++ struct bfq_sched_data *sd = entity->sched_data;
++ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++ int was_in_service = entity == sd->in_service_entity;
++ int ret = 0;
++
++ if (!entity->on_st)
++ return 0;
++
++ BUG_ON(was_in_service && entity->tree != NULL);
++
++ if (was_in_service) {
++ bfq_calc_finish(entity, entity->service);
++ sd->in_service_entity = NULL;
++ } else if (entity->tree == &st->active)
++ bfq_active_extract(st, entity);
++ else if (entity->tree == &st->idle)
++ bfq_idle_extract(st, entity);
++ else if (entity->tree != NULL)
++ BUG();
++
++ if (was_in_service || sd->next_in_service == entity)
++ ret = bfq_update_next_in_service(sd);
++
++ if (!requeue || !bfq_gt(entity->finish, st->vtime))
++ bfq_forget_entity(st, entity);
++ else
++ bfq_idle_insert(st, entity);
++
++ BUG_ON(sd->in_service_entity == entity);
++ BUG_ON(sd->next_in_service == entity);
++
++ return ret;
++}
++
++/**
++ * bfq_deactivate_entity - deactivate an entity.
++ * @entity: the entity to deactivate.
++ * @requeue: true if the entity can be put on the idle tree
++ */
++static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++ struct bfq_sched_data *sd;
++ struct bfq_entity *parent;
++
++ for_each_entity_safe(entity, parent) {
++ sd = entity->sched_data;
++
++ if (!__bfq_deactivate_entity(entity, requeue))
++ /*
++ * The parent entity is still backlogged, and
++ * we don't need to update it as it is still
++ * in service.
++ */
++ break;
++
++ if (sd->next_in_service != NULL)
++ /*
++ * The parent entity is still backlogged and
++ * the budgets on the path towards the root
++ * need to be updated.
++ */
++ goto update;
++
++ /*
++ * If we reach there the parent is no more backlogged and
++ * we want to propagate the dequeue upwards.
++ */
++ requeue = 1;
++ }
++
++ return;
++
++update:
++ entity = parent;
++ for_each_entity(entity) {
++ __bfq_activate_entity(entity);
++
++ sd = entity->sched_data;
++ if (!bfq_update_next_in_service(sd))
++ break;
++ }
++}
++
++/**
++ * bfq_update_vtime - update vtime if necessary.
++ * @st: the service tree to act upon.
++ *
++ * If necessary update the service tree vtime to have at least one
++ * eligible entity, skipping to its start time. Assumes that the
++ * active tree of the device is not empty.
++ *
++ * NOTE: this hierarchical implementation updates vtimes quite often,
++ * we may end up with reactivated processes getting timestamps after a
++ * vtime skip done because we needed a ->first_active entity on some
++ * intermediate node.
++ */
++static void bfq_update_vtime(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entry;
++ struct rb_node *node = st->active.rb_node;
++
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++ if (bfq_gt(entry->min_start, st->vtime)) {
++ st->vtime = entry->min_start;
++ bfq_forget_idle(st);
++ }
++}
++
++/**
++ * bfq_first_active_entity - find the eligible entity with
++ * the smallest finish time
++ * @st: the service tree to select from.
++ *
++ * This function searches the first schedulable entity, starting from the
++ * root of the tree and going on the left every time on this side there is
++ * a subtree with at least one eligible (start >= vtime) entity. The path on
++ * the right is followed only if a) the left subtree contains no eligible
++ * entities and b) no eligible entity has been found yet.
++ */
++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
++{
++ struct bfq_entity *entry, *first = NULL;
++ struct rb_node *node = st->active.rb_node;
++
++ while (node != NULL) {
++ entry = rb_entry(node, struct bfq_entity, rb_node);
++left:
++ if (!bfq_gt(entry->start, st->vtime))
++ first = entry;
++
++ BUG_ON(bfq_gt(entry->min_start, st->vtime));
++
++ if (node->rb_left != NULL) {
++ entry = rb_entry(node->rb_left,
++ struct bfq_entity, rb_node);
++ if (!bfq_gt(entry->min_start, st->vtime)) {
++ node = node->rb_left;
++ goto left;
++ }
++ }
++ if (first != NULL)
++ break;
++ node = node->rb_right;
++ }
++
++ BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
++ return first;
++}
++
++/**
++ * __bfq_lookup_next_entity - return the first eligible entity in @st.
++ * @st: the service tree.
++ *
++ * Update the virtual time in @st and return the first eligible entity
++ * it contains.
++ */
++static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
++ bool force)
++{
++ struct bfq_entity *entity, *new_next_in_service = NULL;
++
++ if (RB_EMPTY_ROOT(&st->active))
++ return NULL;
++
++ bfq_update_vtime(st);
++ entity = bfq_first_active_entity(st);
++ BUG_ON(bfq_gt(entity->start, st->vtime));
++
++ /*
++ * If the chosen entity does not match with the sched_data's
++ * next_in_service and we are forcedly serving the IDLE priority
++ * class tree, bubble up budget update.
++ */
++ if (unlikely(force && entity != entity->sched_data->next_in_service)) {
++ new_next_in_service = entity;
++ for_each_entity(new_next_in_service)
++ bfq_update_budget(new_next_in_service);
++ }
++
++ return entity;
++}
++
++/**
++ * bfq_lookup_next_entity - return the first eligible entity in @sd.
++ * @sd: the sched_data.
++ * @extract: if true the returned entity will be also extracted from @sd.
++ *
++ * NOTE: since we cache the next_in_service entity at each level of the
++ * hierarchy, the complexity of the lookup can be decreased with
++ * absolutely no effort just returning the cached next_in_service value;
++ * we prefer to do full lookups to test the consistency of * the data
++ * structures.
++ */
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++ int extract,
++ struct bfq_data *bfqd)
++{
++ struct bfq_service_tree *st = sd->service_tree;
++ struct bfq_entity *entity;
++ int i = 0;
++
++ BUG_ON(sd->in_service_entity != NULL);
++
++ if (bfqd != NULL &&
++ jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
++ entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
++ true);
++ if (entity != NULL) {
++ i = BFQ_IOPRIO_CLASSES - 1;
++ bfqd->bfq_class_idle_last_service = jiffies;
++ sd->next_in_service = entity;
++ }
++ }
++ for (; i < BFQ_IOPRIO_CLASSES; i++) {
++ entity = __bfq_lookup_next_entity(st + i, false);
++ if (entity != NULL) {
++ if (extract) {
++ bfq_check_next_in_service(sd, entity);
++ bfq_active_extract(st + i, entity);
++ sd->in_service_entity = entity;
++ sd->next_in_service = NULL;
++ }
++ break;
++ }
++ }
++
++ return entity;
++}
++
++/*
++ * Get next queue for service.
++ */
++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
++{
++ struct bfq_entity *entity = NULL;
++ struct bfq_sched_data *sd;
++ struct bfq_queue *bfqq;
++
++ BUG_ON(bfqd->in_service_queue != NULL);
++
++ if (bfqd->busy_queues == 0)
++ return NULL;
++
++ sd = &bfqd->root_group->sched_data;
++ for (; sd != NULL; sd = entity->my_sched_data) {
++ entity = bfq_lookup_next_entity(sd, 1, bfqd);
++ BUG_ON(entity == NULL);
++ entity->service = 0;
++ }
++
++ bfqq = bfq_entity_to_bfqq(entity);
++ BUG_ON(bfqq == NULL);
++
++ return bfqq;
++}
++
++static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
++{
++ if (bfqd->in_service_bic != NULL) {
++ put_io_context(bfqd->in_service_bic->icq.ioc);
++ bfqd->in_service_bic = NULL;
++ }
++
++ bfqd->in_service_queue = NULL;
++ del_timer(&bfqd->idle_slice_timer);
++}
++
++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int requeue)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ if (bfqq == bfqd->in_service_queue)
++ __bfq_bfqd_reset_in_service(bfqd);
++
++ bfq_deactivate_entity(entity, requeue);
++}
++
++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ struct bfq_entity *entity = &bfqq->entity;
++
++ bfq_activate_entity(entity);
++}
++
++/*
++ * Called when the bfqq no longer has requests pending, remove it from
++ * the service tree.
++ */
++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++ int requeue)
++{
++ BUG_ON(!bfq_bfqq_busy(bfqq));
++ BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++ bfq_log_bfqq(bfqd, bfqq, "del from busy");
++
++ bfq_clear_bfqq_busy(bfqq);
++
++ BUG_ON(bfqd->busy_queues == 0);
++ bfqd->busy_queues--;
++
++ if (!bfqq->dispatched) {
++ bfq_weights_tree_remove(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ BUG_ON(!bfqd->busy_in_flight_queues);
++ bfqd->busy_in_flight_queues--;
++ if (bfq_bfqq_constantly_seeky(bfqq)) {
++ BUG_ON(!bfqd->
++ const_seeky_busy_in_flight_queues);
++ bfqd->const_seeky_busy_in_flight_queues--;
++ }
++ }
++ }
++ if (bfqq->wr_coeff > 1)
++ bfqd->wr_busy_queues--;
++
++ bfq_deactivate_bfqq(bfqd, bfqq, requeue);
++}
++
++/*
++ * Called when an inactive queue receives a new request.
++ */
++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++ BUG_ON(bfq_bfqq_busy(bfqq));
++ BUG_ON(bfqq == bfqd->in_service_queue);
++
++ bfq_log_bfqq(bfqd, bfqq, "add to busy");
++
++ bfq_activate_bfqq(bfqd, bfqq);
++
++ bfq_mark_bfqq_busy(bfqq);
++ bfqd->busy_queues++;
++
++ if (!bfqq->dispatched) {
++ if (bfqq->wr_coeff == 1)
++ bfq_weights_tree_add(bfqd, &bfqq->entity,
++ &bfqd->queue_weights_tree);
++ if (!blk_queue_nonrot(bfqd->queue)) {
++ bfqd->busy_in_flight_queues++;
++ if (bfq_bfqq_constantly_seeky(bfqq))
++ bfqd->const_seeky_busy_in_flight_queues++;
++ }
++ }
++ if (bfqq->wr_coeff > 1)
++ bfqd->wr_busy_queues++;
++}
+diff -Nur linux-4.1.3/block/Kconfig.iosched linux-xbian-imx6/block/Kconfig.iosched
+--- linux-4.1.3/block/Kconfig.iosched 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/block/Kconfig.iosched 2015-07-27 23:13:03.600137415 +0200
+@@ -39,6 +39,27 @@
+ ---help---
+ Enable group IO scheduling in CFQ.
+
++config IOSCHED_BFQ
++ tristate "BFQ I/O scheduler"
++ default n
++ ---help---
++ The BFQ I/O scheduler tries to distribute bandwidth among
++ all processes according to their weights.
++ It aims at distributing the bandwidth as desired, independently of
++ the disk parameters and with any workload. It also tries to
++ guarantee low latency to interactive and soft real-time
++ applications. If compiled built-in (saying Y here), BFQ can
++ be configured to support hierarchical scheduling.
++
++config CGROUP_BFQIO
++ bool "BFQ hierarchical scheduling support"
++ depends on CGROUPS && IOSCHED_BFQ=y
++ default n
++ ---help---
++ Enable hierarchical scheduling in BFQ, using the cgroups
++ filesystem interface. The name of the subsystem will be
++ bfqio.
++
+ choice
+ prompt "Default I/O scheduler"
+ default DEFAULT_CFQ
+@@ -52,6 +73,16 @@
+ config DEFAULT_CFQ
+ bool "CFQ" if IOSCHED_CFQ=y
+
++ config DEFAULT_BFQ
++ bool "BFQ" if IOSCHED_BFQ=y
++ help
++ Selects BFQ as the default I/O scheduler which will be
++ used by default for all block devices.
++ The BFQ I/O scheduler aims at distributing the bandwidth
++ as desired, independently of the disk parameters and with
++ any workload. It also tries to guarantee low latency to
++ interactive and soft real-time applications.
++
+ config DEFAULT_NOOP
+ bool "No-op"
+
+@@ -61,6 +92,7 @@
+ string
+ default "deadline" if DEFAULT_DEADLINE
+ default "cfq" if DEFAULT_CFQ
++ default "bfq" if DEFAULT_BFQ
+ default "noop" if DEFAULT_NOOP
+
+ endmenu
+diff -Nur linux-4.1.3/block/Makefile linux-xbian-imx6/block/Makefile
+--- linux-4.1.3/block/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/block/Makefile 2015-07-27 23:13:03.600137415 +0200
+@@ -18,6 +18,7 @@
+ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
+ obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
+ obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
++obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
+
+ obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
+ obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
+diff -Nur linux-4.1.3/Documentation/devicetree/bindings/mmc/mmc.txt linux-xbian-imx6/Documentation/devicetree/bindings/mmc/mmc.txt
+--- linux-4.1.3/Documentation/devicetree/bindings/mmc/mmc.txt 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/Documentation/devicetree/bindings/mmc/mmc.txt 2015-07-27 23:12:59.335353684 +0200
+@@ -5,6 +5,8 @@
+ Interpreted by the OF core:
+ - reg: Registers location and length.
+ - interrupts: Interrupts used by the MMC controller.
++- clocks: Clocks needed for the host controller, if any.
++- clock-names: Goes with clocks above.
+
+ Card detection:
+ If no property below is supplied, host native card detect is used.
+@@ -43,6 +45,15 @@
+ - dsr: Value the card's (optional) Driver Stage Register (DSR) should be
+ programmed with. Valid range: [0 .. 0xffff].
+
++Card power and reset control:
++The following properties can be specified for cases where the MMC
++peripheral needs additional reset, regulator and clock lines. It is for
++example common for WiFi/BT adapters to have these separate from the main
++MMC bus:
++ - card-reset-gpios: Specify GPIOs for card reset (reset active low)
++ - card-external-vcc-supply: Regulator to drive (independent) card VCC
++ - clock with name "card_ext_clock": External clock provided to the card
++
+ *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
+ polarity properties, we have to fix the meaning of the "normal" and "inverted"
+ line levels. We choose to follow the SDHCI standard, which specifies both those
+diff -Nur linux-4.1.3/Documentation/devicetree/bindings/vendor-prefixes.txt linux-xbian-imx6/Documentation/devicetree/bindings/vendor-prefixes.txt
+--- linux-4.1.3/Documentation/devicetree/bindings/vendor-prefixes.txt 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/Documentation/devicetree/bindings/vendor-prefixes.txt 2015-07-27 23:12:59.518699524 +0200
+@@ -201,6 +201,7 @@
+ variscite Variscite Ltd.
+ via VIA Technologies, Inc.
+ virtio Virtual I/O Device Specification, developed by the OASIS consortium
++vivante Vivante Corporation
+ voipac Voipac Technologies s.r.o.
+ winbond Winbond Electronics corp.
+ wlf Wolfson Microelectronics
+diff -Nur linux-4.1.3/drivers/char/frandom.c linux-xbian-imx6/drivers/char/frandom.c
+--- linux-4.1.3/drivers/char/frandom.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/char/frandom.c 2015-07-27 23:13:03.986757999 +0200
+@@ -0,0 +1,415 @@
++/*
++** frandom.c
++** Fast pseudo-random generator
++**
++** (c) Copyright 2003-2011 Eli Billauer
++** http://www.billauer.co.il
++**
++** This program is free software; you can redistribute it and/or modify
++** it under the terms of the GNU General Public License as published by
++** the Free Software Foundation; either version 2 of the License, or
++** (at your option) any later version.
++**
++**
++*/
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/random.h>
++
++#include <asm/uaccess.h>
++#include <linux/cdev.h>
++#include <linux/err.h>
++#include <linux/device.h>
++
++#define INTERNAL_SEED 0
++#define EXTERNAL_SEED 1
++
++#define FRANDOM_MAJOR 235
++#define FRANDOM_MINOR 11
++#define ERANDOM_MINOR 12
++
++static struct file_operations frandom_fops; /* Values assigned below */
++
++static int erandom_seeded = 0; /* Internal flag */
++
++static int frandom_major = FRANDOM_MAJOR;
++static int frandom_minor = FRANDOM_MINOR;
++static int erandom_minor = ERANDOM_MINOR;
++static int frandom_bufsize = 256;
++static int frandom_chunklimit = 0; /* =0 means unlimited */
++
++static struct cdev frandom_cdev;
++static struct cdev erandom_cdev;
++static struct class *frandom_class;
++struct device *frandom_device;
++struct device *erandom_device;
++
++MODULE_DESCRIPTION("Fast pseudo-random number generator");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Eli Billauer");
++module_param(frandom_major, int, 0);
++module_param(frandom_minor, int, 0);
++module_param(erandom_minor, int, 0);
++module_param(frandom_bufsize, int, 0);
++module_param(frandom_chunklimit, int, 0);
++
++MODULE_PARM_DESC(frandom_major,"Major number of /dev/frandom and /dev/erandom");
++MODULE_PARM_DESC(frandom_minor,"Minor number of /dev/frandom");
++MODULE_PARM_DESC(erandom_minor,"Minor number of /dev/erandom");
++MODULE_PARM_DESC(frandom_bufsize,"Internal buffer size in bytes. Default is 256. Must be >= 256");
++MODULE_PARM_DESC(frandom_chunklimit,"Limit for read() blocks size. 0 (default) is unlimited, otherwise must be >= 256");
++
++struct frandom_state
++{
++ struct semaphore sem; /* Semaphore on the state structure */
++
++ u8 S[256]; /* The state array */
++ u8 i;
++ u8 j;
++
++ char *buf;
++};
++
++static struct frandom_state *erandom_state;
++
++static inline void swap_byte(u8 *a, u8 *b)
++{
++ u8 swapByte;
++
++ swapByte = *a;
++ *a = *b;
++ *b = swapByte;
++}
++
++static void init_rand_state(struct frandom_state *state, int seedflag);
++
++void erandom_get_random_bytes(char *buf, size_t count)
++{
++ struct frandom_state *state = erandom_state;
++ int k;
++
++ unsigned int i;
++ unsigned int j;
++ u8 *S;
++
++ /* If we fail to get the semaphore, we revert to external random data.
++ Since semaphore blocking is expected to be very rare, and interrupts
++ during these rare and very short periods of time even less frequent,
++ we take the better-safe-than-sorry approach, and fill the buffer
++ some expensive random data, in case the caller wasn't aware of this
++ possibility, and expects random data anyhow.
++ */
++
++ if (down_interruptible(&state->sem)) {
++ get_random_bytes(buf, count);
++ return;
++ }
++
++ /* We seed erandom as late as possible, hoping that the kernel's main
++ RNG is already restored in the boot sequence (not critical, but
++ better.
++ */
++
++ if (!erandom_seeded) {
++ erandom_seeded = 1;
++ init_rand_state(state, EXTERNAL_SEED);
++ printk(KERN_INFO "frandom: Seeded global generator now (used by erandom)\n");
++ }
++
++ i = state->i;
++ j = state->j;
++ S = state->S;
++
++ for (k=0; k<count; k++) {
++ i = (i + 1) & 0xff;
++ j = (j + S[i]) & 0xff;
++ swap_byte(&S[i], &S[j]);
++ *buf++ = S[(S[i] + S[j]) & 0xff];
++ }
++
++ state->i = i;
++ state->j = j;
++
++ up(&state->sem);
++}
++
++static void init_rand_state(struct frandom_state *state, int seedflag)
++{
++ unsigned int i, j, k;
++ u8 *S;
++ u8 *seed = state->buf;
++
++ if (seedflag == INTERNAL_SEED)
++ erandom_get_random_bytes(seed, 256);
++ else
++ get_random_bytes(seed, 256);
++
++ S = state->S;
++ for (i=0; i<256; i++)
++ *S++=i;
++
++ j=0;
++ S = state->S;
++
++ for (i=0; i<256; i++) {
++ j = (j + S[i] + *seed++) & 0xff;
++ swap_byte(&S[i], &S[j]);
++ }
++
++ /* It's considered good practice to discard the first 256 bytes
++ generated. So we do it:
++ */
++
++ i=0; j=0;
++ for (k=0; k<256; k++) {
++ i = (i + 1) & 0xff;
++ j = (j + S[i]) & 0xff;
++ swap_byte(&S[i], &S[j]);
++ }
++
++ state->i = i; /* Save state */
++ state->j = j;
++}
++
++static int frandom_open(struct inode *inode, struct file *filp)
++{
++
++ struct frandom_state *state;
++
++ int num = iminor(inode);
++
++ /* This should never happen, now when the minors are regsitered
++ * explicitly
++ */
++ if ((num != frandom_minor) && (num != erandom_minor)) return -ENODEV;
++
++ state = kmalloc(sizeof(struct frandom_state), GFP_KERNEL);
++ if (!state)
++ return -ENOMEM;
++
++ state->buf = kmalloc(frandom_bufsize, GFP_KERNEL);
++ if (!state->buf) {
++ kfree(state);
++ return -ENOMEM;
++ }
++
++ sema_init(&state->sem, 1); /* Init semaphore as a mutex */
++
++ if (num == frandom_minor)
++ init_rand_state(state, EXTERNAL_SEED);
++ else
++ init_rand_state(state, INTERNAL_SEED);
++
++ filp->private_data = state;
++
++ return 0; /* Success */
++}
++
++static int frandom_release(struct inode *inode, struct file *filp)
++{
++
++ struct frandom_state *state = filp->private_data;
++
++ kfree(state->buf);
++ kfree(state);
++
++ return 0;
++}
++
++static ssize_t frandom_read(struct file *filp, char *buf, size_t count,
++ loff_t *f_pos)
++{
++ struct frandom_state *state = filp->private_data;
++ ssize_t ret;
++ int dobytes, k;
++ char *localbuf;
++
++ unsigned int i;
++ unsigned int j;
++ u8 *S;
++
++ if (down_interruptible(&state->sem))
++ return -ERESTARTSYS;
++
++ if ((frandom_chunklimit > 0) && (count > frandom_chunklimit))
++ count = frandom_chunklimit;
++
++ ret = count; /* It's either everything or an error... */
++
++ i = state->i;
++ j = state->j;
++ S = state->S;
++
++ while (count) {
++ if (count > frandom_bufsize)
++ dobytes = frandom_bufsize;
++ else
++ dobytes = count;
++
++ localbuf = state->buf;
++
++ for (k=0; k<dobytes; k++) {
++ i = (i + 1) & 0xff;
++ j = (j + S[i]) & 0xff;
++ swap_byte(&S[i], &S[j]);
++ *localbuf++ = S[(S[i] + S[j]) & 0xff];
++ }
++
++ if (copy_to_user(buf, state->buf, dobytes)) {
++ ret = -EFAULT;
++ goto out;
++ }
++
++ buf += dobytes;
++ count -= dobytes;
++ }
++
++ out:
++ state->i = i;
++ state->j = j;
++
++ up(&state->sem);
++ return ret;
++}
++
++static struct file_operations frandom_fops = {
++ read: frandom_read,
++ open: frandom_open,
++ release: frandom_release,
++};
++
++static void frandom_cleanup_module(void) {
++ unregister_chrdev_region(MKDEV(frandom_major, erandom_minor), 1);
++ cdev_del(&erandom_cdev);
++ device_destroy(frandom_class, MKDEV(frandom_major, erandom_minor));
++
++ unregister_chrdev_region(MKDEV(frandom_major, frandom_minor), 1);
++ cdev_del(&frandom_cdev);
++ device_destroy(frandom_class, MKDEV(frandom_major, frandom_minor));
++ class_destroy(frandom_class);
++
++ kfree(erandom_state->buf);
++ kfree(erandom_state);
++}
++
++
++static int frandom_init_module(void)
++{
++ int result;
++
++ /* The buffer size MUST be at least 256 bytes, because we assume that
++ minimal length in init_rand_state().
++ */
++ if (frandom_bufsize < 256) {
++ printk(KERN_ERR "frandom: Refused to load because frandom_bufsize=%d < 256\n",frandom_bufsize);
++ return -EINVAL;
++ }
++ if ((frandom_chunklimit != 0) && (frandom_chunklimit < 256)) {
++ printk(KERN_ERR "frandom: Refused to load because frandom_chunklimit=%d < 256 and != 0\n",frandom_chunklimit);
++ return -EINVAL;
++ }
++
++ erandom_state = kmalloc(sizeof(struct frandom_state), GFP_KERNEL);
++ if (!erandom_state)
++ return -ENOMEM;
++
++ /* This specific buffer is only used for seeding, so we need
++ 256 bytes exactly */
++ erandom_state->buf = kmalloc(256, GFP_KERNEL);
++ if (!erandom_state->buf) {
++ kfree(erandom_state);
++ return -ENOMEM;
++ }
++
++ sema_init(&erandom_state->sem, 1); /* Init semaphore as a mutex */
++
++ erandom_seeded = 0;
++
++ frandom_class = class_create(THIS_MODULE, "fastrng");
++ if (IS_ERR(frandom_class)) {
++ result = PTR_ERR(frandom_class);
++ printk(KERN_WARNING "frandom: Failed to register class fastrng\n");
++ goto error0;
++ }
++
++ /*
++ * Register your major, and accept a dynamic number. This is the
++ * first thing to do, in order to avoid releasing other module's
++ * fops in frandom_cleanup_module()
++ */
++
++ cdev_init(&frandom_cdev, &frandom_fops);
++ frandom_cdev.owner = THIS_MODULE;
++ result = cdev_add(&frandom_cdev, MKDEV(frandom_major, frandom_minor), 1);
++ if (result) {
++ printk(KERN_WARNING "frandom: Failed to add cdev for /dev/frandom\n");
++ goto error1;
++ }
++
++ result = register_chrdev_region(MKDEV(frandom_major, frandom_minor), 1, "/dev/frandom");
++ if (result < 0) {
++ printk(KERN_WARNING "frandom: can't get major/minor %d/%d\n", frandom_major, frandom_minor);
++ goto error2;
++ }
++
++ frandom_device = device_create(frandom_class, NULL, MKDEV(frandom_major, frandom_minor), NULL, "frandom");
++
++ if (IS_ERR(frandom_device)) {
++ printk(KERN_WARNING "frandom: Failed to create frandom device\n");
++ goto error3;
++ }
++
++ cdev_init(&erandom_cdev, &frandom_fops);
++ erandom_cdev.owner = THIS_MODULE;
++ result = cdev_add(&erandom_cdev, MKDEV(frandom_major, erandom_minor), 1);
++ if (result) {
++ printk(KERN_WARNING "frandom: Failed to add cdev for /dev/erandom\n");
++ goto error4;
++ }
++
++ result = register_chrdev_region(MKDEV(frandom_major, erandom_minor), 1, "/dev/erandom");
++ if (result < 0) {
++ printk(KERN_WARNING "frandom: can't get major/minor %d/%d\n", frandom_major, erandom_minor);
++ goto error5;
++ }
++
++ erandom_device = device_create(frandom_class, NULL, MKDEV(frandom_major, erandom_minor), NULL, "erandom");
++
++ if (IS_ERR(erandom_device)) {
++ printk(KERN_WARNING "frandom: Failed to create erandom device\n");
++ goto error6;
++ }
++ return 0; /* succeed */
++
++ error6:
++ unregister_chrdev_region(MKDEV(frandom_major, erandom_minor), 1);
++ error5:
++ cdev_del(&erandom_cdev);
++ error4:
++ device_destroy(frandom_class, MKDEV(frandom_major, frandom_minor));
++ error3:
++ unregister_chrdev_region(MKDEV(frandom_major, frandom_minor), 1);
++ error2:
++ cdev_del(&frandom_cdev);
++ error1:
++ class_destroy(frandom_class);
++ error0:
++ kfree(erandom_state->buf);
++ kfree(erandom_state);
++
++ return result;
++}
++
++module_init(frandom_init_module);
++module_exit(frandom_cleanup_module);
++
++EXPORT_SYMBOL(erandom_get_random_bytes);
+diff -Nur linux-4.1.3/drivers/char/Makefile linux-xbian-imx6/drivers/char/Makefile
+--- linux-4.1.3/drivers/char/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/char/Makefile 2015-07-27 23:13:03.982772220 +0200
+@@ -2,6 +2,7 @@
+ # Makefile for the kernel character device drivers.
+ #
+
++obj-m += frandom.o
+ obj-y += mem.o random.o
+ obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
+ obj-y += misc.o
+diff -Nur linux-4.1.3/drivers/cpufreq/imx6q-cpufreq.c linux-xbian-imx6/drivers/cpufreq/imx6q-cpufreq.c
+--- linux-4.1.3/drivers/cpufreq/imx6q-cpufreq.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/cpufreq/imx6q-cpufreq.c 2015-07-27 23:13:04.158146502 +0200
+@@ -20,6 +20,8 @@
+ #define PU_SOC_VOLTAGE_HIGH 1275000
+ #define FREQ_1P2_GHZ 1200000000
+
++extern int vpu352;
++
+ static struct regulator *arm_reg;
+ static struct regulator *pu_reg;
+ static struct regulator *soc_reg;
+@@ -251,6 +253,10 @@
+ unsigned long volt = be32_to_cpup(val++);
+ if (freq_table[j].frequency == freq) {
+ imx6_soc_volt[soc_opp_count++] = volt;
++ if (vpu352 && freq == 792000) {
++ pr_info("VPU352: increase SOC/PU voltage for VPU352MHz\n");
++ imx6_soc_volt[soc_opp_count-1] = 1250000;
++ }
+ break;
+ }
+ }
+diff -Nur linux-4.1.3/drivers/crypto/caam/caamalg.c linux-xbian-imx6/drivers/crypto/caam/caamalg.c
+--- linux-4.1.3/drivers/crypto/caam/caamalg.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/caamalg.c 2015-07-27 23:13:04.205975852 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * caam - Freescale FSL CAAM support for crypto API
+ *
+- * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * Based on talitos crypto API driver.
+ *
+@@ -53,6 +53,7 @@
+ #include "error.h"
+ #include "sg_sw_sec4.h"
+ #include "key_gen.h"
++#include <linux/string.h>
+
+ /*
+ * crypto alg
+@@ -60,68 +61,42 @@
+ #define CAAM_CRA_PRIORITY 3000
+ /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+ #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
+- CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+ /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+ #define CAAM_MAX_IV_LENGTH 16
+
+ /* length of descriptors text */
++#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
++
+ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
++#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
++#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+ #define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+-/* Note: Nonce is counted in enckeylen */
+-#define DESC_AEAD_CTR_RFC3686_LEN (6 * CAAM_CMD_SZ)
+-
+-#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
+-#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+-
+-#define DESC_GCM_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_GCM_ENC_LEN (DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
+-#define DESC_GCM_DEC_LEN (DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
+-
+-#define DESC_RFC4106_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_RFC4106_ENC_LEN (DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
+-#define DESC_RFC4106_DEC_LEN (DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
+-#define DESC_RFC4106_GIVENC_LEN (DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
+-
+-#define DESC_RFC4543_BASE (3 * CAAM_CMD_SZ)
+-#define DESC_RFC4543_ENC_LEN (DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
+-#define DESC_RFC4543_DEC_LEN (DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
+-#define DESC_RFC4543_GIVENC_LEN (DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
+-
+ #define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+ #define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+ #define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+-#define DESC_MAX_USED_BYTES (DESC_RFC4543_GIVENC_LEN + \
++#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
+ CAAM_MAX_KEY_SIZE)
+ #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
+
+ #ifdef DEBUG
+ /* for print_hex_dumps with line references */
++#define xstr(s) str(s)
++#define str(s) #s
+ #define debug(format, arg...) printk(format, arg)
+ #else
+ #define debug(format, arg...)
+ #endif
+-static struct list_head alg_list;
+
+ /* Set DK bit in class 1 operation if shared */
+ static inline void append_dec_op1(u32 *desc, u32 type)
+ {
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+- /* DK bit is valid only for AES */
+- if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+- append_operation(desc, type | OP_ALG_AS_INITFINAL |
+- OP_ALG_DECRYPT);
+- return;
+- }
+-
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+@@ -133,26 +108,37 @@
+ }
+
+ /*
++ * Wait for completion of class 1 key loading before allowing
++ * error propagation
++ */
++static inline void append_dec_shr_done(u32 *desc)
++{
++ u32 *jump_cmd;
++
++ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
++ set_jump_tgt_here(desc, jump_cmd);
++ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
++}
++
++/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+ {
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ }
+
+ /*
+ * For aead encrypt and decrypt, read iv for both classes
+ */
+-static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
++static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+ {
+- append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- (ivoffset << LDST_OFFSET_SHIFT));
+- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+- (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | ivsize);
++ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+ }
+
+ /*
+@@ -198,309 +184,68 @@
+ };
+
+ static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
+- int keys_fit_inline, bool is_rfc3686)
++ int keys_fit_inline)
+ {
+- u32 *nonce;
+- unsigned int enckeylen = ctx->enckeylen;
+-
+- /*
+- * RFC3686 specific:
+- * | ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
+- * | enckeylen = encryption key size + nonce size
+- */
+- if (is_rfc3686)
+- enckeylen -= CTR_RFC3686_NONCE_SIZE;
+-
+ if (keys_fit_inline) {
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key_as_imm(desc, (void *)ctx->key +
+- ctx->split_key_pad_len, enckeylen,
+- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
++ ctx->split_key_pad_len, ctx->enckeylen,
++ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else {
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+- enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- }
+-
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686) {
+- nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
+- enckeylen);
+- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+- append_move(desc,
+- MOVE_SRC_OUTFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (16 << MOVE_OFFSET_SHIFT) |
+- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
++ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+ }
+
+ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
+- int keys_fit_inline, bool is_rfc3686)
++ int keys_fit_inline)
+ {
+ u32 *key_jump_cmd;
+
+- /* Note: Context registers are saved. */
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+-
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
+-
+- set_jump_tgt_here(desc, key_jump_cmd);
+-}
+-
+-static int aead_null_set_sh_desc(struct crypto_aead *aead)
+-{
+- struct aead_tfm *tfm = &aead->base.crt_aead;
+- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline = false;
+- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
+- u32 *desc;
+-
+- /*
+- * Job Descriptor and Shared Descriptors
+- * must all fit into the 64-word Descriptor h/w Buffer
+- */
+- if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- /* aead_encrypt shared descriptor */
+- desc = ctx->sh_desc_enc;
+-
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+- ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- else
+- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* cryptlen = seqoutlen - authsize */
+- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+-
+- /*
+- * NULL encryption; IV is zero
+- * assoclen = (assoclen + cryptlen) - cryptlen
+- */
+- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+-
+- /* read assoc before reading payload */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+- KEY_VLF);
+-
+- /* Prepare to read and write cryptlen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+- MOVE_DEST_MATH3 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+- MOVE_DEST_DESCBUF |
+- MOVE_WAITCOMP |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Class 2 operation */
+- append_operation(desc, ctx->class2_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Read and write cryptlen bytes */
+- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+- MOVE_AUX_LS);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "aead null enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- /*
+- * Job Descriptor and Shared Descriptors
+- * must all fit into the 64-word Descriptor h/w Buffer
+- */
+- keys_fit_inline = false;
+- if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- desc = ctx->sh_desc_dec;
+
+- /* aead_decrypt shared descriptor */
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
++ append_key_aead(desc, ctx, keys_fit_inline);
+
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+- ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+- else
+- append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+- KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+- /* Class 2 operation */
+- append_operation(desc, ctx->class2_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+-
+- /* assoclen + cryptlen = seqinlen - ivsize - authsize */
+- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+- ctx->authsize + tfm->ivsize);
+- /* assoclen = (assoclen + cryptlen) - cryptlen */
+- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+-
+- /* read assoc before reading payload */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+- KEY_VLF);
+-
+- /* Prepare to read and write cryptlen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+- MOVE_DEST_MATH2 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+- MOVE_DEST_DESCBUF |
+- MOVE_WAITCOMP |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Read and write cryptlen bytes */
+- aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+-
+- /*
+- * Insert a NOP here, since we need at least 4 instructions between
+- * code patching the descriptor buffer and the location being patched.
+- */
+- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+- set_jump_tgt_here(desc, jump_cmd);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+- MOVE_AUX_LS);
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+- /* Load ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "aead null dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- return 0;
++ /* Propagate errors from shared to job descriptor */
++ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+ }
+
+ static int aead_set_sh_desc(struct crypto_aead *aead)
+ {
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
+- const char *alg_name = crypto_tfm_alg_name(ctfm);
+ struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline;
++ bool keys_fit_inline = false;
++ u32 *key_jump_cmd, *jump_cmd;
+ u32 geniv, moveiv;
+- u32 ctx1_iv_off = 0;
+ u32 *desc;
+- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+- OP_ALG_AAI_CTR_MOD128);
+- const bool is_rfc3686 = (ctr_mode &&
+- (strstr(alg_name, "rfc3686") != NULL));
+
+- if (!ctx->authsize)
++ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+- /* NULL encryption / decryption */
+- if (!ctx->enckeylen)
+- return aead_null_set_sh_desc(aead);
+-
+- /*
+- * AES-CTR needs to load IV in CONTEXT1 reg
+- * at an offset of 128bits (16bytes)
+- * CONTEXT1[255:128] = IV
+- */
+- if (ctr_mode)
+- ctx1_iv_off = 16;
+-
+- /*
+- * RFC3686 specific:
+- * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+- */
+- if (is_rfc3686)
+- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+-
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+ if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len + ctx->enckeylen +
+- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
++ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+- /* Note: Context registers are saved. */
+- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
++ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+@@ -512,21 +257,13 @@
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+- /* assoclen = (assoclen + cryptlen) - cryptlen */
++ /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+- aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+-
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+- LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
++ aead_append_ld_iv(desc, tfm->ivsize);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+@@ -549,35 +286,46 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
+
++ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc),
++ DMA_TO_DEVICE);
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+ if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len + ctx->enckeylen +
+- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
++ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+- /* aead_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+
+- /* Note: Context registers are saved. */
+- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
++ /* aead_decrypt shared descriptor */
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Skip if already shared */
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ append_key_aead(desc, ctx, keys_fit_inline);
++
++ /* Only propagate error immediately if shared */
++ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
++ set_jump_tgt_here(desc, key_jump_cmd);
++ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
++ set_jump_tgt_here(desc, jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+- /* assoclen + cryptlen = seqinlen - ivsize - authsize */
++ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+- ctx->authsize + tfm->ivsize);
++ ctx->authsize + tfm->ivsize)
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+@@ -586,22 +334,9 @@
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+- aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
++ aead_append_ld_iv(desc, tfm->ivsize);
+
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+- LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+- /* Choose operation */
+- if (ctr_mode)
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+- else
+- append_dec_op1(desc, ctx->class1_alg_type);
++ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+@@ -611,6 +346,7 @@
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
++ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+@@ -620,27 +356,26 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
++ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc),
++ DMA_TO_DEVICE);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+- keys_fit_inline = false;
+ if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len + ctx->enckeylen +
+- (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
++ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+- /* Note: Context registers are saved. */
+- init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
++ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+@@ -649,16 +384,13 @@
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
++ append_move(desc, MOVE_SRC_INFIFO |
++ MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy IV to class 1 context */
+- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+- (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
++ append_move(desc, MOVE_SRC_CLASS1CTX |
++ MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, ctx->class2_alg_type |
+@@ -674,7 +406,7 @@
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+- /* Copy iv from outfifo to class 2 fifo */
++ /* Copy iv from class 1 ctx to class 2 fifo*/
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+@@ -682,14 +414,6 @@
+ append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+- LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+@@ -717,10 +441,12 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
++ dma_sync_single_for_cpu(jrdev, ctx->sh_desc_givenc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+ }
+@@ -736,977 +462,84 @@
+ return 0;
+ }
+
+-static int gcm_set_sh_desc(struct crypto_aead *aead)
++static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
++ u32 authkeylen)
+ {
+- struct aead_tfm *tfm = &aead->base.crt_aead;
++ return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
++ ctx->split_key_pad_len, key_in, authkeylen,
++ ctx->alg_op);
++}
++
++static int aead_setkey(struct crypto_aead *aead,
++ const u8 *key, unsigned int keylen)
++{
++ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
++ static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline = false;
+- u32 *key_jump_cmd, *zero_payload_jump_cmd,
+- *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
+- u32 *desc;
+-
+- if (!ctx->enckeylen || !ctx->authsize)
+- return 0;
+-
+- /*
+- * AES GCM encrypt shared descriptor
+- * Job Descriptor and Shared Descriptor
+- * must fit into the 64-word Descriptor h/w Buffer
+- */
+- if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
++ struct rtattr *rta = (void *)key;
++ struct crypto_authenc_key_param *param;
++ unsigned int authkeylen;
++ unsigned int enckeylen;
++ int ret = 0;
+
+- desc = ctx->sh_desc_enc;
++ param = RTA_DATA(rta);
++ enckeylen = be32_to_cpu(param->enckeylen);
+
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
++ key += RTA_ALIGN(rta->rta_len);
++ keylen -= RTA_ALIGN(rta->rta_len);
+
+- /* skip key loading if they are loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD | JUMP_COND_SELF);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
++ if (keylen < enckeylen)
++ goto badkey;
+
+- /* class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
++ authkeylen = keylen - enckeylen;
+
+- /* cryptlen = seqoutlen - authsize */
+- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
++ if (keylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
+
+- /* assoclen + cryptlen = seqinlen - ivsize */
+- append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
++ /* Pick class 2 key length from algorithm submask */
++ ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
++ OP_ALG_ALGSEL_SHIFT] * 2;
++ ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+- /* assoclen = (assoclen + cryptlen) - cryptlen */
+- append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
++#ifdef DEBUG
++ printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
++ keylen, enckeylen, authkeylen);
++ printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
++ ctx->split_key_len, ctx->split_key_pad_len);
++ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
+
+- /* if cryptlen is ZERO jump to zero-payload commands */
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+- /* read IV */
+- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+-
+- /* if assoclen is ZERO, skip reading the assoc data */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+-
+- /* read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
++ ret = gen_split_aead_key(ctx, key, authkeylen);
++ if (ret) {
++ goto badkey;
++ }
+
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ /* postpend encryption key to auth split key */
++ memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
+
+- /* write encrypted data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
++ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
++ enckeylen, DMA_TO_DEVICE);
++ if (dma_mapping_error(jrdev, ctx->key_dma)) {
++ dev_err(jrdev, "unable to map key i/o memory\n");
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++ ctx->split_key_pad_len + enckeylen, 1);
++#endif
++ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ ctx->split_key_pad_len + enckeylen,
++ DMA_TO_DEVICE);
+
+- /* read payload data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+-
+- /* jump the zero-payload commands */
+- append_jump(desc, JUMP_TEST_ALL | 7);
+-
+- /* zero-payload commands */
+- set_jump_tgt_here(desc, zero_payload_jump_cmd);
+-
+- /* if assoclen is ZERO, jump to IV reading - is the only input data */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+- /* read IV */
+- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+-
+- /* read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
+-
+- /* jump to ICV writing */
+- append_jump(desc, JUMP_TEST_ALL | 2);
+-
+- /* read IV - is the only input data */
+- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+- FIFOLD_TYPE_LAST1);
+-
+- /* write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- /*
+- * Job Descriptor and Shared Descriptors
+- * must all fit into the 64-word Descriptor h/w Buffer
+- */
+- keys_fit_inline = false;
+- if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- desc = ctx->sh_desc_dec;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* skip key loading if they are loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL |
+- JUMP_TEST_ALL | JUMP_COND_SHRD |
+- JUMP_COND_SELF);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+-
+- /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+- ctx->authsize + tfm->ivsize);
+-
+- /* assoclen = (assoclen + cryptlen) - cryptlen */
+- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+- append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
+-
+- /* read IV */
+- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+-
+- /* jump to zero-payload command if cryptlen is zero */
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+- zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+-
+- append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+- /* if asoclen is ZERO, skip reading assoc data */
+- zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+- /* read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+- set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+-
+- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+-
+- /* store encrypted data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* read payload data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+-
+- /* jump the zero-payload commands */
+- append_jump(desc, JUMP_TEST_ALL | 4);
+-
+- /* zero-payload command */
+- set_jump_tgt_here(desc, zero_payload_jump_cmd);
+-
+- /* if assoclen is ZERO, jump to ICV reading */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+- zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+- JUMP_COND_MATH_Z);
+- /* read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+- set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+-
+- /* read ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- return 0;
+-}
+-
+-static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+-{
+- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+-
+- ctx->authsize = authsize;
+- gcm_set_sh_desc(authenc);
+-
+- return 0;
+-}
+-
+-static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+-{
+- struct aead_tfm *tfm = &aead->base.crt_aead;
+- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline = false;
+- u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
+- u32 *desc;
+- u32 geniv;
+-
+- if (!ctx->enckeylen || !ctx->authsize)
+- return 0;
+-
+- /*
+- * RFC4106 encrypt shared descriptor
+- * Job Descriptor and Shared Descriptor
+- * must fit into the 64-word Descriptor h/w Buffer
+- */
+- if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- desc = ctx->sh_desc_enc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* cryptlen = seqoutlen - authsize */
+- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* assoclen + cryptlen = seqinlen - ivsize */
+- append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+-
+- /* assoclen = (assoclen + cryptlen) - cryptlen */
+- append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+-
+- /* Read Salt */
+- append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+- 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+- /* Read AES-GCM-ESP IV */
+- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+-
+- /* Read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+-
+- /* Will read cryptlen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* Write encrypted data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* Read payload data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- /*
+- * Job Descriptor and Shared Descriptors
+- * must all fit into the 64-word Descriptor h/w Buffer
+- */
+- keys_fit_inline = false;
+- if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- desc = ctx->sh_desc_dec;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL |
+- JUMP_TEST_ALL | JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+-
+- /* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+- ctx->authsize + tfm->ivsize);
+-
+- /* assoclen = (assoclen + cryptlen) - cryptlen */
+- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+-
+- /* Will write cryptlen bytes */
+- append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Read Salt */
+- append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+- 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+- /* Read AES-GCM-ESP IV */
+- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+-
+- /* Read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+-
+- /* Will read cryptlen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+-
+- /* Store payload data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* Read encrypted data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+-
+- /* Read ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- /*
+- * Job Descriptor and Shared Descriptors
+- * must all fit into the 64-word Descriptor h/w Buffer
+- */
+- keys_fit_inline = false;
+- if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
+- ctx->split_key_pad_len + ctx->enckeylen <=
+- CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- /* rfc4106_givencrypt shared descriptor */
+- desc = ctx->sh_desc_givenc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Generate IV */
+- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+- NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+- /* Copy generated IV to OFIFO */
+- write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* ivsize + cryptlen = seqoutlen - authsize */
+- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+-
+- /* assoclen = seqinlen - (ivsize + cryptlen) */
+- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+-
+- /* Will write ivsize + cryptlen */
+- append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+-
+- /* Read Salt and generated IV */
+- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
+- FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
+- /* Append Salt */
+- append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+- set_move_tgt_here(desc, move_cmd);
+- set_move_tgt_here(desc, write_iv_cmd);
+- /* Blank commands. Will be overwritten by generated IV. */
+- append_cmd(desc, 0x00000000);
+- append_cmd(desc, 0x00000000);
+- /* End of blank commands */
+-
+- /* No need to reload iv */
+- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+-
+- /* Read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+-
+- /* Will read cryptlen */
+- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Store generated IV and encrypted data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* Read payload data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- return 0;
+-}
+-
+-static int rfc4106_setauthsize(struct crypto_aead *authenc,
+- unsigned int authsize)
+-{
+- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+-
+- ctx->authsize = authsize;
+- rfc4106_set_sh_desc(authenc);
+-
+- return 0;
+-}
+-
+-static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+-{
+- struct aead_tfm *tfm = &aead->base.crt_aead;
+- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- struct device *jrdev = ctx->jrdev;
+- bool keys_fit_inline = false;
+- u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
+- u32 *read_move_cmd, *write_move_cmd;
+- u32 *desc;
+- u32 geniv;
+-
+- if (!ctx->enckeylen || !ctx->authsize)
+- return 0;
+-
+- /*
+- * RFC4543 encrypt shared descriptor
+- * Job Descriptor and Shared Descriptor
+- * must fit into the 64-word Descriptor h/w Buffer
+- */
+- if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- desc = ctx->sh_desc_enc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Load AES-GMAC ESP IV into Math1 register */
+- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+- LDST_CLASS_DECO | tfm->ivsize);
+-
+- /* Wait the DMA transaction to finish */
+- append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+- (1 << JUMP_OFFSET_SHIFT));
+-
+- /* Overwrite blank immediate AES-GMAC ESP IV data */
+- write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+-
+- /* Overwrite blank immediate AAD data */
+- write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+-
+- /* cryptlen = seqoutlen - authsize */
+- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+-
+- /* assoclen = (seqinlen - ivsize) - cryptlen */
+- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+-
+- /* Read Salt and AES-GMAC ESP IV */
+- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+- /* Append Salt */
+- append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+- set_move_tgt_here(desc, write_iv_cmd);
+- /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+- append_cmd(desc, 0x00000000);
+- append_cmd(desc, 0x00000000);
+- /* End of blank commands */
+-
+- /* Read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD);
+-
+- /* Will read cryptlen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /* Will write cryptlen bytes */
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Authenticate AES-GMAC ESP IV */
+- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+- FIFOLD_TYPE_AAD | tfm->ivsize);
+- set_move_tgt_here(desc, write_aad_cmd);
+- /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+- append_cmd(desc, 0x00000000);
+- append_cmd(desc, 0x00000000);
+- /* End of blank commands */
+-
+- /* Read and write cryptlen bytes */
+- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- /* Move payload data to OFIFO */
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- /*
+- * Job Descriptor and Shared Descriptors
+- * must all fit into the 64-word Descriptor h/w Buffer
+- */
+- keys_fit_inline = false;
+- if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- desc = ctx->sh_desc_dec;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL |
+- JUMP_TEST_ALL | JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+-
+- /* Load AES-GMAC ESP IV into Math1 register */
+- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+- LDST_CLASS_DECO | tfm->ivsize);
+-
+- /* Wait the DMA transaction to finish */
+- append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+- (1 << JUMP_OFFSET_SHIFT));
+-
+- /* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
+- append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
+-
+- /* Overwrite blank immediate AES-GMAC ESP IV data */
+- write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+-
+- /* Overwrite blank immediate AAD data */
+- write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+-
+- /* assoclen = (assoclen + cryptlen) - cryptlen */
+- append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+- append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Read Salt and AES-GMAC ESP IV */
+- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+- /* Append Salt */
+- append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+- set_move_tgt_here(desc, write_iv_cmd);
+- /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+- append_cmd(desc, 0x00000000);
+- append_cmd(desc, 0x00000000);
+- /* End of blank commands */
+-
+- /* Read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD);
+-
+- /* Will read cryptlen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+-
+- /* Will write cryptlen bytes */
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+-
+- /* Authenticate AES-GMAC ESP IV */
+- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+- FIFOLD_TYPE_AAD | tfm->ivsize);
+- set_move_tgt_here(desc, write_aad_cmd);
+- /* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+- append_cmd(desc, 0x00000000);
+- append_cmd(desc, 0x00000000);
+- /* End of blank commands */
+-
+- /* Store payload data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+-
+- /* In-snoop cryptlen data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- /* Move payload data to OFIFO */
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+- /* Read ICV */
+- append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+- FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+-
+- ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- /*
+- * Job Descriptor and Shared Descriptors
+- * must all fit into the 64-word Descriptor h/w Buffer
+- */
+- keys_fit_inline = false;
+- if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
+- ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+- keys_fit_inline = true;
+-
+- /* rfc4543_givencrypt shared descriptor */
+- desc = ctx->sh_desc_givenc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL);
+-
+- /* Skip key loading if it is loaded due to sharing */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+- if (keys_fit_inline)
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+- else
+- append_key(desc, ctx->key_dma, ctx->enckeylen,
+- CLASS_1 | KEY_DEST_CLASS_REG);
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Generate IV */
+- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+- NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- /* Move generated IV to Math1 register */
+- append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+- /* Overwrite blank immediate AES-GMAC IV data */
+- write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+-
+- /* Overwrite blank immediate AAD data */
+- write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+-
+- /* Copy generated IV to OFIFO */
+- append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
+- (tfm->ivsize << MOVE_LEN_SHIFT));
+-
+- /* Class 1 operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* ivsize + cryptlen = seqoutlen - authsize */
+- append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+-
+- /* assoclen = seqinlen - (ivsize + cryptlen) */
+- append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+-
+- /* Will write ivsize + cryptlen */
+- append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+-
+- /*
+- * MOVE_LEN opcode is not available in all SEC HW revisions,
+- * thus need to do some magic, i.e. self-patch the descriptor
+- * buffer.
+- */
+- read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+- (0x6 << MOVE_LEN_SHIFT));
+- write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+- (0x8 << MOVE_LEN_SHIFT));
+-
+- /* Read Salt and AES-GMAC generated IV */
+- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+- FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+- /* Append Salt */
+- append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+- set_move_tgt_here(desc, write_iv_cmd);
+- /* Blank commands. Will be overwritten by AES-GMAC generated IV. */
+- append_cmd(desc, 0x00000000);
+- append_cmd(desc, 0x00000000);
+- /* End of blank commands */
+-
+- /* No need to reload iv */
+- append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+-
+- /* Read assoc data */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+- FIFOLD_TYPE_AAD);
+-
+- /* Will read cryptlen */
+- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-
+- /* Authenticate AES-GMAC IV */
+- append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+- FIFOLD_TYPE_AAD | tfm->ivsize);
+- set_move_tgt_here(desc, write_aad_cmd);
+- /* Blank commands. Will be overwritten by AES-GMAC IV. */
+- append_cmd(desc, 0x00000000);
+- append_cmd(desc, 0x00000000);
+- /* End of blank commands */
+-
+- /* Read and write cryptlen bytes */
+- aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+-
+- set_move_tgt_here(desc, read_move_cmd);
+- set_move_tgt_here(desc, write_move_cmd);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- /* Move payload data to OFIFO */
+- append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+-
+- /* Write ICV */
+- append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT);
+-
+- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+-
+- return 0;
+-}
+-
+-static int rfc4543_setauthsize(struct crypto_aead *authenc,
+- unsigned int authsize)
+-{
+- struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+-
+- ctx->authsize = authsize;
+- rfc4543_set_sh_desc(authenc);
+-
+- return 0;
+-}
+-
+-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
+- u32 authkeylen)
+-{
+- return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+- ctx->split_key_pad_len, key_in, authkeylen,
+- ctx->alg_op);
+-}
+-
+-static int aead_setkey(struct crypto_aead *aead,
+- const u8 *key, unsigned int keylen)
+-{
+- /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- struct device *jrdev = ctx->jrdev;
+- struct crypto_authenc_keys keys;
+- int ret = 0;
+-
+- if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+- goto badkey;
+-
+- /* Pick class 2 key length from algorithm submask */
+- ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+- OP_ALG_ALGSEL_SHIFT] * 2;
+- ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+-
+- if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+- goto badkey;
+-
+-#ifdef DEBUG
+- printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+- keys.authkeylen + keys.enckeylen, keys.enckeylen,
+- keys.authkeylen);
+- printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+- ctx->split_key_len, ctx->split_key_pad_len);
+- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+-#endif
+-
+- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+- if (ret) {
+- goto badkey;
+- }
+-
+- /* postpend encryption key to auth split key */
+- memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+-
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+- keys.enckeylen, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+- ctx->split_key_pad_len + keys.enckeylen, 1);
+-#endif
+-
+- ctx->enckeylen = keys.enckeylen;
++ ctx->enckeylen = enckeylen;
+
+ ret = aead_set_sh_desc(aead);
+ if (ret) {
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+- keys.enckeylen, DMA_TO_DEVICE);
++ enckeylen, DMA_TO_DEVICE);
+ }
+
+ return ret;
+@@ -1715,154 +548,20 @@
+ return -EINVAL;
+ }
+
+-static int gcm_setkey(struct crypto_aead *aead,
+- const u8 *key, unsigned int keylen)
+-{
+- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+-
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+-#endif
+-
+- memcpy(ctx->key, key, keylen);
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+- ctx->enckeylen = keylen;
+-
+- ret = gcm_set_sh_desc(aead);
+- if (ret) {
+- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- }
+-
+- return ret;
+-}
+-
+-static int rfc4106_setkey(struct crypto_aead *aead,
+- const u8 *key, unsigned int keylen)
+-{
+- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+-
+- if (keylen < 4)
+- return -EINVAL;
+-
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+-#endif
+-
+- memcpy(ctx->key, key, keylen);
+-
+- /*
+- * The last four bytes of the key material are used as the salt value
+- * in the nonce. Update the AES key length.
+- */
+- ctx->enckeylen = keylen - 4;
+-
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+-
+- ret = rfc4106_set_sh_desc(aead);
+- if (ret) {
+- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- }
+-
+- return ret;
+-}
+-
+-static int rfc4543_setkey(struct crypto_aead *aead,
+- const u8 *key, unsigned int keylen)
+-{
+- struct caam_ctx *ctx = crypto_aead_ctx(aead);
+- struct device *jrdev = ctx->jrdev;
+- int ret = 0;
+-
+- if (keylen < 4)
+- return -EINVAL;
+-
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+-#endif
+-
+- memcpy(ctx->key, key, keylen);
+-
+- /*
+- * The last four bytes of the key material are used as the salt value
+- * in the nonce. Update the AES key length.
+- */
+- ctx->enckeylen = keylen - 4;
+-
+- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->key_dma)) {
+- dev_err(jrdev, "unable to map key i/o memory\n");
+- return -ENOMEM;
+- }
+-
+- ret = rfc4543_set_sh_desc(aead);
+- if (ret) {
+- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+- DMA_TO_DEVICE);
+- }
+-
+- return ret;
+-}
+-
+ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+ {
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+- struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
+- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+- const char *alg_name = crypto_tfm_alg_name(tfm);
++ struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+- u32 *key_jump_cmd;
++ u32 *key_jump_cmd, *jump_cmd;
+ u32 *desc;
+- u32 *nonce;
+- u32 geniv;
+- u32 ctx1_iv_off = 0;
+- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+- OP_ALG_AAI_CTR_MOD128);
+- const bool is_rfc3686 = (ctr_mode &&
+- (strstr(alg_name, "rfc3686") != NULL));
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ #endif
+- /*
+- * AES-CTR needs to load IV in CONTEXT1 reg
+- * at an offset of 128bits (16bytes)
+- * CONTEXT1[255:128] = IV
+- */
+- if (ctr_mode)
+- ctx1_iv_off = 16;
+-
+- /*
+- * RFC3686 specific:
+- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+- * | *key = {KEY, NONCE}
+- */
+- if (is_rfc3686) {
+- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+- keylen -= CTR_RFC3686_NONCE_SIZE;
+- }
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+@@ -1872,10 +571,11 @@
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+
+ /* ablkcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+@@ -1885,32 +585,20 @@
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+- /* Load nonce into CONTEXT1 reg */
+- if (is_rfc3686) {
+- nonce = (u32 *)(key + keylen);
+- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_OUTFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (16 << MOVE_OFFSET_SHIFT) |
+- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+- }
+-
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+- /* Load iv */
+- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+-
+- /* Load counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+- LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
++ /* Propagate errors from shared to job descriptor */
++ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+
++ /* load IV */
++ if (strncmp(ablkcipher->base.__crt_alg->cra_name, "ctr(aes)", 8) == 0) {
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | tfm->ivsize |
++ (16 << LDST_OFFSET_SHIFT));
++ } else {
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | tfm->ivsize);
++ }
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+@@ -1926,15 +614,17 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
++
+ /* ablkcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+@@ -1944,133 +634,49 @@
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+- /* Load nonce into CONTEXT1 reg */
+- if (is_rfc3686) {
+- nonce = (u32 *)(key + keylen);
+- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_OUTFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (16 << MOVE_OFFSET_SHIFT) |
+- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+- }
+-
++ /* For aead, only propagate error immediately if shared */
++ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
++ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
++ set_jump_tgt_here(desc, jump_cmd);
+
+ /* load IV */
+- append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+-
+- /* Load counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+- LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
++ if (strncmp(ablkcipher->base.__crt_alg->cra_name, "ctr(aes)", 8) == 0) {
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | tfm->ivsize |
++ (16 << LDST_OFFSET_SHIFT));
+
+- /* Choose operation */
+- if (ctr_mode)
+ append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+- else
++ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
++ } else {
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | tfm->ivsize);
++
++ /* Choose operation */
+ append_dec_op1(desc, ctx->class1_alg_type);
++ }
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
++ /* Wait for key to load before allowing propagating error */
++ append_dec_shr_done(desc);
++
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
++ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, desc,
+- desc_bytes(desc), 1);
+-#endif
+- /* ablkcipher_givencrypt shared descriptor */
+- desc = ctx->sh_desc_givenc;
+-
+- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+- /* Skip if already shared */
+- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+- JUMP_COND_SHRD);
+-
+- /* Load class1 key only */
+- append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+- ctx->enckeylen, CLASS_1 |
+- KEY_DEST_CLASS_REG);
+-
+- /* Load Nonce into CONTEXT1 reg */
+- if (is_rfc3686) {
+- nonce = (u32 *)(key + keylen);
+- append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_OUTFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (16 << MOVE_OFFSET_SHIFT) |
+- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+- }
+- set_jump_tgt_here(desc, key_jump_cmd);
+-
+- /* Generate IV */
+- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+- NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
+- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+- append_move(desc, MOVE_WAITCOMP |
+- MOVE_SRC_INFIFO |
+- MOVE_DEST_CLASS1CTX |
+- (crt->ivsize << MOVE_LEN_SHIFT) |
+- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+-
+- /* Copy generated IV to memory */
+- append_seq_store(desc, crt->ivsize,
+- LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+- (ctx1_iv_off << LDST_OFFSET_SHIFT));
+-
+- /* Load Counter into CONTEXT1 reg */
+- if (is_rfc3686)
+- append_load_imm_u32(desc, (u32)1, LDST_IMM |
+- LDST_CLASS_1_CCB |
+- LDST_SRCDST_BYTE_CONTEXT |
+- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+- LDST_OFFSET_SHIFT));
+-
+- if (ctx1_iv_off)
+- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+- (1 << JUMP_OFFSET_SHIFT));
+-
+- /* Load operation */
+- append_operation(desc, ctx->class1_alg_type |
+- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Perform operation */
+- ablkcipher_append_src_dst(desc);
+-
+- ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+- desc_bytes(desc),
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
++ print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return ret;
+ }
+@@ -2195,19 +801,22 @@
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+ aead_unmap(jrdev, edesc, req);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
+ edesc->src_nents ? 100 : ivsize, 1);
+- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen +
+ ctx->authsize + 4, 1);
+@@ -2235,16 +844,19 @@
+ offsetof(struct aead_edesc, hw_desc));
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+- req->cryptlen - ctx->authsize, 1);
++ req->cryptlen, 1);
+ #endif
+
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+ aead_unmap(jrdev, edesc, req);
+
+@@ -2255,7 +867,7 @@
+ err = -EBADMSG;
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4,
+ ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
+ sizeof(struct iphdr) + req->assoclen +
+@@ -2263,7 +875,7 @@
+ ctx->authsize + 36, 1);
+ if (!err && edesc->sec4_sg_bytes) {
+ struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
+- print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
+ sg->length + ctx->authsize + 16, 1);
+ }
+@@ -2289,14 +901,17 @@
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
+@@ -2321,14 +936,17 @@
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+- print_hex_dump(KERN_ERR, "dst @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ #endif
+@@ -2355,38 +973,29 @@
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+- bool is_gcm = false;
+
+ #ifdef DEBUG
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents ? 100 : ivsize, 1);
+- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen, 1);
+- print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
+ #endif
+
+- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+- OP_ALG_ALGSEL_AES) &&
+- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+- is_gcm = true;
+-
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (all_contig) {
+- if (is_gcm)
+- src_dma = edesc->iv_dma;
+- else
+- src_dma = sg_dma_address(req->assoc);
++ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+@@ -2394,9 +1003,12 @@
+ (edesc->src_nents ? : 1);
+ in_options = LDST_SGF;
+ }
+-
+- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+- in_options);
++ if (encrypt)
++ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
++ req->cryptlen - authsize, in_options);
++ else
++ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
++ req->cryptlen, in_options);
+
+ if (likely(req->src == req->dst)) {
+ if (all_contig) {
+@@ -2417,8 +1029,7 @@
+ }
+ }
+ if (encrypt)
+- append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
+- out_options);
++ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ else
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
+ out_options);
+@@ -2440,53 +1051,43 @@
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, sec4_sg_index = 0;
+- bool is_gcm = false;
+
+ #ifdef DEBUG
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+- print_hex_dump(KERN_ERR, "assoc @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+- print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
+ #endif
+
+- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+- OP_ALG_ALGSEL_AES) &&
+- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+- is_gcm = true;
+-
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (contig & GIV_SRC_CONTIG) {
+- if (is_gcm)
+- src_dma = edesc->iv_dma;
+- else
+- src_dma = sg_dma_address(req->assoc);
++ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+ sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
+ in_options = LDST_SGF;
+ }
+- append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
+- in_options);
++ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
++ req->cryptlen - authsize, in_options);
+
+ if (contig & GIV_DST_CONTIG) {
+ dst_dma = edesc->iv_dma;
+ } else {
+ if (likely(req->src == req->dst)) {
+ dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
+- (edesc->assoc_nents +
+- (is_gcm ? 1 + edesc->src_nents : 0));
++ edesc->assoc_nents;
+ out_options = LDST_SGF;
+ } else {
+ dst_dma = edesc->sec4_sg_dma +
+@@ -2496,8 +1097,7 @@
+ }
+ }
+
+- append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
+- out_options);
++ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+ }
+
+ /*
+@@ -2516,10 +1116,10 @@
+ int len, sec4_sg_index = 0;
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+- print_hex_dump(KERN_ERR, "src @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
+ #endif
+@@ -2532,7 +1132,7 @@
+ in_options = 0;
+ } else {
+ src_dma = edesc->sec4_sg_dma;
+- sec4_sg_index += edesc->src_nents + 1;
++ sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+ in_options = LDST_SGF;
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
+@@ -2558,59 +1158,10 @@
+ }
+
+ /*
+- * Fill in ablkcipher givencrypt job descriptor
+- */
+-static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+- struct ablkcipher_edesc *edesc,
+- struct ablkcipher_request *req,
+- bool iv_contig)
+-{
+- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+- u32 *desc = edesc->hw_desc;
+- u32 out_options, in_options;
+- dma_addr_t dst_dma, src_dma;
+- int len, sec4_sg_index = 0;
+-
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+- ivsize, 1);
+- print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+- edesc->src_nents ? 100 : req->nbytes, 1);
+-#endif
+-
+- len = desc_len(sh_desc);
+- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+-
+- if (!edesc->src_nents) {
+- src_dma = sg_dma_address(req->src);
+- in_options = 0;
+- } else {
+- src_dma = edesc->sec4_sg_dma;
+- sec4_sg_index += edesc->src_nents;
+- in_options = LDST_SGF;
+- }
+- append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+-
+- if (iv_contig) {
+- dst_dma = edesc->iv_dma;
+- out_options = 0;
+- } else {
+- dst_dma = edesc->sec4_sg_dma +
+- sec4_sg_index * sizeof(struct sec4_sg_entry);
+- out_options = LDST_SGF;
+- }
+- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+-}
+-
+-/*
+ * allocate and map the aead extended descriptor
+ */
+ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+- int desc_bytes, bool *all_contig_ptr,
+- bool encrypt)
++ int desc_bytes, bool *all_contig_ptr)
+ {
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+@@ -2625,26 +1176,15 @@
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int ivsize = crypto_aead_ivsize(aead);
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+- unsigned int authsize = ctx->authsize;
+- bool is_gcm = false;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
++ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+
+- if (unlikely(req->dst != req->src)) {
+- src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+- dst_nents = sg_count(req->dst,
+- req->cryptlen +
+- (encrypt ? authsize : (-authsize)),
+- &dst_chained);
+- } else {
+- src_nents = sg_count(req->src,
+- req->cryptlen +
+- (encrypt ? authsize : 0),
+- &src_chained);
+- }
++ if (unlikely(req->dst != req->src))
++ dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+
+ sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
+- DMA_TO_DEVICE, assoc_chained);
++ DMA_BIDIRECTIONAL, assoc_chained);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+@@ -2655,43 +1195,23 @@
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
++ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, iv_dma)) {
+- dev_err(jrdev, "unable to map IV\n");
+- return ERR_PTR(-ENOMEM);
+- }
+-
+- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+- OP_ALG_ALGSEL_AES) &&
+- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+- is_gcm = true;
+-
+- /*
+- * Check if data are contiguous.
+- * GCM expected input sequence: IV, AAD, text
+- * All other - expected input sequence: AAD, IV, text
+- */
+- if (is_gcm)
+- all_contig = (!assoc_nents &&
+- iv_dma + ivsize == sg_dma_address(req->assoc) &&
+- !src_nents && sg_dma_address(req->assoc) +
+- req->assoclen == sg_dma_address(req->src));
+- else
+- all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
+- req->assoclen == iv_dma && !src_nents &&
+- iv_dma + ivsize == sg_dma_address(req->src));
+- if (!all_contig) {
++ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
++ iv_dma || src_nents || iv_dma + ivsize !=
++ sg_dma_address(req->src)) {
++ all_contig = false;
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ sec4_sg_len = assoc_nents + 1 + src_nents;
+ }
+-
+ sec4_sg_len += dst_nents;
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
++ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
++ edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+@@ -2708,46 +1228,32 @@
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes, DMA_TO_DEVICE);
+ *all_contig_ptr = all_contig;
+
+ sec4_sg_index = 0;
+ if (!all_contig) {
+- if (!is_gcm) {
+- sg_to_sec4_sg(req->assoc,
+- assoc_nents,
+- edesc->sec4_sg +
+- sec4_sg_index, 0);
+- sec4_sg_index += assoc_nents;
+- }
+-
++ sg_to_sec4_sg(req->assoc,
++ (assoc_nents ? : 1),
++ edesc->sec4_sg +
++ sec4_sg_index, 0);
++ sec4_sg_index += assoc_nents ? : 1;
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+-
+- if (is_gcm) {
+- sg_to_sec4_sg(req->assoc,
+- assoc_nents,
+- edesc->sec4_sg +
+- sec4_sg_index, 0);
+- sec4_sg_index += assoc_nents;
+- }
+-
+ sg_to_sec4_sg_last(req->src,
+- src_nents,
++ (src_nents ? : 1),
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+- sec4_sg_index += src_nents;
++ sec4_sg_index += src_nents ? : 1;
+ }
+ if (dst_nents) {
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return ERR_PTR(-ENOMEM);
+- }
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
++ DMA_TO_DEVICE);
+
+ return edesc;
+ }
+@@ -2762,9 +1268,11 @@
+ u32 *desc;
+ int ret = 0;
+
++ req->cryptlen += ctx->authsize;
++
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+- CAAM_CMD_SZ, &all_contig, true);
++ CAAM_CMD_SZ, &all_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+@@ -2772,7 +1280,7 @@
+ init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+ all_contig, true);
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+ #endif
+@@ -2801,12 +1309,12 @@
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+- CAAM_CMD_SZ, &all_contig, false);
++ CAAM_CMD_SZ, &all_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+ #endif
+@@ -2815,7 +1323,7 @@
+ init_aead_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+ #endif
+@@ -2853,17 +1361,15 @@
+ int ivsize = crypto_aead_ivsize(aead);
+ bool assoc_chained = false, src_chained = false, dst_chained = false;
+ int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+- bool is_gcm = false;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
+ src_nents = sg_count(req->src, req->cryptlen, &src_chained);
+
+ if (unlikely(req->dst != req->src))
+- dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
+- &dst_chained);
++ dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained);
+
+ sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
+- DMA_TO_DEVICE, assoc_chained);
++ DMA_BIDIRECTIONAL, assoc_chained);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL, src_chained);
+@@ -2874,64 +1380,32 @@
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
++ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, iv_dma)) {
+- dev_err(jrdev, "unable to map IV\n");
+- return ERR_PTR(-ENOMEM);
+- }
+-
+- if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+- OP_ALG_ALGSEL_AES) &&
+- ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+- is_gcm = true;
+-
+- /*
+- * Check if data are contiguous.
+- * GCM expected input sequence: IV, AAD, text
+- * All other - expected input sequence: AAD, IV, text
+- */
+-
+- if (is_gcm) {
+- if (assoc_nents || iv_dma + ivsize !=
+- sg_dma_address(req->assoc) || src_nents ||
+- sg_dma_address(req->assoc) + req->assoclen !=
+- sg_dma_address(req->src))
+- contig &= ~GIV_SRC_CONTIG;
+- } else {
+- if (assoc_nents ||
+- sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
+- src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+- contig &= ~GIV_SRC_CONTIG;
+- }
+-
++ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
++ iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
++ contig &= ~GIV_SRC_CONTIG;
+ if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
+ contig &= ~GIV_DST_CONTIG;
+-
++ if (unlikely(req->src != req->dst)) {
++ dst_nents = dst_nents ? : 1;
++ sec4_sg_len += 1;
++ }
+ if (!(contig & GIV_SRC_CONTIG)) {
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ sec4_sg_len += assoc_nents + 1 + src_nents;
+- if (req->src == req->dst &&
+- (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
+- contig &= ~GIV_DST_CONTIG;
+- }
+-
+- /*
+- * Add new sg entries for GCM output sequence.
+- * Expected output sequence: IV, encrypted text.
+- */
+- if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
+- sec4_sg_len += 1 + src_nents;
+-
+- if (unlikely(req->src != req->dst)) {
+- dst_nents = dst_nents ? : 1;
+- sec4_sg_len += 1 + dst_nents;
++ if (likely(req->src == req->dst))
++ contig &= ~GIV_DST_CONTIG;
+ }
++ sec4_sg_len += dst_nents;
+
+ sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
+
++ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
++
+ /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
++ edesc = kzalloc(sizeof(struct aead_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+@@ -2948,40 +1422,24 @@
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes, DMA_TO_DEVICE);
+ *contig_ptr = contig;
+
+ sec4_sg_index = 0;
+ if (!(contig & GIV_SRC_CONTIG)) {
+- if (!is_gcm) {
+- sg_to_sec4_sg(req->assoc, assoc_nents,
+- edesc->sec4_sg + sec4_sg_index, 0);
+- sec4_sg_index += assoc_nents;
+- }
+-
++ sg_to_sec4_sg(req->assoc, assoc_nents,
++ edesc->sec4_sg +
++ sec4_sg_index, 0);
++ sec4_sg_index += assoc_nents;
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+ sec4_sg_index += 1;
+-
+- if (is_gcm) {
+- sg_to_sec4_sg(req->assoc, assoc_nents,
+- edesc->sec4_sg + sec4_sg_index, 0);
+- sec4_sg_index += assoc_nents;
+- }
+-
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg +
+ sec4_sg_index, 0);
+ sec4_sg_index += src_nents;
+ }
+-
+- if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
+- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+- iv_dma, ivsize, 0);
+- sec4_sg_index += 1;
+- sg_to_sec4_sg_last(req->src, src_nents,
+- edesc->sec4_sg + sec4_sg_index, 0);
+- }
+-
+ if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
+ dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+ iv_dma, ivsize, 0);
+@@ -2989,12 +1447,8 @@
+ sg_to_sec4_sg_last(req->dst, dst_nents,
+ edesc->sec4_sg + sec4_sg_index, 0);
+ }
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return ERR_PTR(-ENOMEM);
+- }
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
++ DMA_TO_DEVICE);
+
+ return edesc;
+ }
+@@ -3010,6 +1464,8 @@
+ u32 *desc;
+ int ret = 0;
+
++ req->cryptlen += ctx->authsize;
++
+ /* allocate extended descriptor */
+ edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &contig);
+@@ -3018,7 +1474,7 @@
+ return PTR_ERR(edesc);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+ #endif
+@@ -3027,7 +1483,7 @@
+ init_aead_giv_job(ctx->sh_desc_givenc,
+ ctx->sh_desc_givenc_dma, edesc, req, contig);
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+ #endif
+@@ -3044,11 +1500,6 @@
+ return ret;
+ }
+
+-static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
+-{
+- return aead_encrypt(&areq->areq);
+-}
+-
+ /*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+@@ -3086,16 +1537,12 @@
+ DMA_FROM_DEVICE, dst_chained);
+ }
+
+- iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, iv_dma)) {
+- dev_err(jrdev, "unable to map IV\n");
+- return ERR_PTR(-ENOMEM);
+- }
+-
+ /*
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
+ */
++ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
++ dma_sync_single_for_device(jrdev, iv_dma, ivsize, DMA_TO_DEVICE);
+ if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+ iv_contig = true;
+ else
+@@ -3104,7 +1551,7 @@
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
++ edesc = kzalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+@@ -3134,15 +1581,13 @@
+
+ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ sec4_sg_bytes, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return ERR_PTR(-ENOMEM);
+- }
+-
+ edesc->iv_dma = iv_dma;
+
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
++ DMA_TO_DEVICE);
++
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+ sec4_sg_bytes, 1);
+ #endif
+@@ -3171,7 +1616,7 @@
+ init_ablkcipher_job(ctx->sh_desc_enc,
+ ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+ #endif
+@@ -3209,7 +1654,7 @@
+ ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+ desc = edesc->hw_desc;
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+ #endif
+@@ -3225,291 +1670,28 @@
+ return ret;
+ }
+
+-/*
+- * allocate and map the ablkcipher extended descriptor
+- * for ablkcipher givencrypt
+- */
+-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+- struct skcipher_givcrypt_request *greq,
+- int desc_bytes,
+- bool *iv_contig_out)
+-{
+- struct ablkcipher_request *req = &greq->creq;
+- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+- struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+- GFP_KERNEL : GFP_ATOMIC;
+- int src_nents, dst_nents = 0, sec4_sg_bytes;
+- struct ablkcipher_edesc *edesc;
+- dma_addr_t iv_dma = 0;
+- bool iv_contig = false;
+- int sgc;
+- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+- bool src_chained = false, dst_chained = false;
+- int sec4_sg_index;
+-
+- src_nents = sg_count(req->src, req->nbytes, &src_chained);
+-
+- if (unlikely(req->dst != req->src))
+- dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+-
+- if (likely(req->src == req->dst)) {
+- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+- DMA_BIDIRECTIONAL, src_chained);
+- } else {
+- sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+- DMA_TO_DEVICE, src_chained);
+- sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+- DMA_FROM_DEVICE, dst_chained);
+- }
+-
+- /*
+- * Check if iv can be contiguous with source and destination.
+- * If so, include it. If not, create scatterlist.
+- */
+- iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, iv_dma)) {
+- dev_err(jrdev, "unable to map IV\n");
+- return ERR_PTR(-ENOMEM);
+- }
+-
+- if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
+- iv_contig = true;
+- else
+- dst_nents = dst_nents ? : 1;
+- sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+- sizeof(struct sec4_sg_entry);
+-
+- /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(*edesc) + desc_bytes +
+- sec4_sg_bytes, GFP_DMA | flags);
+- if (!edesc) {
+- dev_err(jrdev, "could not allocate extended descriptor\n");
+- return ERR_PTR(-ENOMEM);
+- }
+-
+- edesc->src_nents = src_nents;
+- edesc->src_chained = src_chained;
+- edesc->dst_nents = dst_nents;
+- edesc->dst_chained = dst_chained;
+- edesc->sec4_sg_bytes = sec4_sg_bytes;
+- edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+- desc_bytes;
+-
+- sec4_sg_index = 0;
+- if (src_nents) {
+- sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+- sec4_sg_index += src_nents;
+- }
+-
+- if (!iv_contig) {
+- dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+- iv_dma, ivsize, 0);
+- sec4_sg_index += 1;
+- sg_to_sec4_sg_last(req->dst, dst_nents,
+- edesc->sec4_sg + sec4_sg_index, 0);
+- }
+-
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return ERR_PTR(-ENOMEM);
+- }
+- edesc->iv_dma = iv_dma;
+-
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+- sec4_sg_bytes, 1);
+-#endif
+-
+- *iv_contig_out = iv_contig;
+- return edesc;
+-}
+-
+-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
+-{
+- struct ablkcipher_request *req = &creq->creq;
+- struct ablkcipher_edesc *edesc;
+- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+- struct device *jrdev = ctx->jrdev;
+- bool iv_contig;
+- u32 *desc;
+- int ret = 0;
+-
+- /* allocate extended descriptor */
+- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
+- CAAM_CMD_SZ, &iv_contig);
+- if (IS_ERR(edesc))
+- return PTR_ERR(edesc);
+-
+- /* Create and submit job descriptor*/
+- init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+- edesc, req, iv_contig);
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+- desc_bytes(edesc->hw_desc), 1);
+-#endif
+- desc = edesc->hw_desc;
+- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+-
+- if (!ret) {
+- ret = -EINPROGRESS;
+- } else {
+- ablkcipher_unmap(jrdev, edesc, req);
+- kfree(edesc);
+- }
+-
+- return ret;
+-}
+-
+ #define template_aead template_u.aead
+ #define template_ablkcipher template_u.ablkcipher
+ struct caam_alg_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+- char driver_name[CRYPTO_MAX_ALG_NAME];
+- unsigned int blocksize;
+- u32 type;
+- union {
+- struct ablkcipher_alg ablkcipher;
+- struct aead_alg aead;
+- struct blkcipher_alg blkcipher;
+- struct cipher_alg cipher;
+- struct compress_alg compress;
+- struct rng_alg rng;
+- } template_u;
+- u32 class1_alg_type;
+- u32 class2_alg_type;
+- u32 alg_op;
+-};
+-
+-static struct caam_alg_template driver_algs[] = {
+- /* single-pass ipsec_esp descriptor */
+- {
+- .name = "authenc(hmac(md5),ecb(cipher_null))",
+- .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
+- .blocksize = NULL_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_null_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = NULL_IV_SIZE,
+- .maxauthsize = MD5_DIGEST_SIZE,
+- },
+- .class1_alg_type = 0,
+- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+- },
+- {
+- .name = "authenc(hmac(sha1),ecb(cipher_null))",
+- .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
+- .blocksize = NULL_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_null_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = NULL_IV_SIZE,
+- .maxauthsize = SHA1_DIGEST_SIZE,
+- },
+- .class1_alg_type = 0,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+- },
+- {
+- .name = "authenc(hmac(sha224),ecb(cipher_null))",
+- .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
+- .blocksize = NULL_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_null_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = NULL_IV_SIZE,
+- .maxauthsize = SHA224_DIGEST_SIZE,
+- },
+- .class1_alg_type = 0,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+- OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+- },
+- {
+- .name = "authenc(hmac(sha256),ecb(cipher_null))",
+- .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
+- .blocksize = NULL_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_null_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = NULL_IV_SIZE,
+- .maxauthsize = SHA256_DIGEST_SIZE,
+- },
+- .class1_alg_type = 0,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+- OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+- },
+- {
+- .name = "authenc(hmac(sha384),ecb(cipher_null))",
+- .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
+- .blocksize = NULL_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_null_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = NULL_IV_SIZE,
+- .maxauthsize = SHA384_DIGEST_SIZE,
+- },
+- .class1_alg_type = 0,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+- OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+- },
+- {
+- .name = "authenc(hmac(sha512),ecb(cipher_null))",
+- .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
+- .blocksize = NULL_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_null_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = NULL_IV_SIZE,
+- .maxauthsize = SHA512_DIGEST_SIZE,
+- },
+- .class1_alg_type = 0,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+- OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+- },
++ char driver_name[CRYPTO_MAX_ALG_NAME];
++ unsigned int blocksize;
++ u32 type;
++ union {
++ struct ablkcipher_alg ablkcipher;
++ struct aead_alg aead;
++ struct blkcipher_alg blkcipher;
++ struct cipher_alg cipher;
++ struct compress_alg compress;
++ struct rng_alg rng;
++ } template_u;
++ u32 class1_alg_type;
++ u32 class2_alg_type;
++ u32 alg_op;
++};
++
++static struct caam_alg_template driver_algs[] = {
++ /* single-pass ipsec_esp descriptor */
+ {
+ .name = "authenc(hmac(md5),cbc(aes))",
+ .driver_name = "authenc-hmac-md5-cbc-aes-caam",
+@@ -3865,188 +2047,81 @@
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
++ /* ablkcipher descriptor */
+ {
+- .name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+- .driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = CTR_RFC3686_IV_SIZE,
+- .maxauthsize = MD5_DIGEST_SIZE,
+- },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+- .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+- },
+- {
+- .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+- .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = CTR_RFC3686_IV_SIZE,
+- .maxauthsize = SHA1_DIGEST_SIZE,
+- },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+- },
+- {
+- .name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+- .driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = CTR_RFC3686_IV_SIZE,
+- .maxauthsize = SHA224_DIGEST_SIZE,
++ .name = "ecb(des)",
++ .driver_name = "ecb-des-caam",
++ .blocksize = DES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "eseqiv",
++ .min_keysize = DES_KEY_SIZE,
++ .max_keysize = DES_KEY_SIZE,
++ .ivsize = DES_BLOCK_SIZE,
+ },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+- OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
++ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB,
+ },
+ {
+- .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+- .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = CTR_RFC3686_IV_SIZE,
+- .maxauthsize = SHA256_DIGEST_SIZE,
+- },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+- OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
++ .name = "ecb(arc4)",
++ .driver_name = "ecb-arc4-caam",
++ .blocksize = ARC4_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "eseqiv",
++ .min_keysize = ARC4_MIN_KEY_SIZE,
++ .max_keysize = ARC4_MAX_KEY_SIZE,
++ .ivsize = ARC4_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_ARC4 | OP_ALG_AAI_ECB
+ },
+ {
+- .name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+- .driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = CTR_RFC3686_IV_SIZE,
+- .maxauthsize = SHA384_DIGEST_SIZE,
++ .name = "ecb(aes)",
++ .driver_name = "ecb-aes-caam",
++ .blocksize = AES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "eseqiv",
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
+ },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+- OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
++ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB,
+ },
+ {
+- .name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+- .driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = aead_setkey,
+- .setauthsize = aead_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = CTR_RFC3686_IV_SIZE,
+- .maxauthsize = SHA512_DIGEST_SIZE,
++ .name = "ctr(aes)",
++ .driver_name = "ctr-aes-caam",
++ .blocksize = AES_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "eseqiv",
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+- .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+- OP_ALG_AAI_HMAC_PRECOMP,
+- .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+- },
+- {
+- .name = "rfc4106(gcm(aes))",
+- .driver_name = "rfc4106-gcm-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = rfc4106_setkey,
+- .setauthsize = rfc4106_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = 8,
+- .maxauthsize = AES_BLOCK_SIZE,
+- },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+- },
+- {
+- .name = "rfc4543(gcm(aes))",
+- .driver_name = "rfc4543-gcm-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = rfc4543_setkey,
+- .setauthsize = rfc4543_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = aead_givencrypt,
+- .geniv = "<built-in>",
+- .ivsize = 8,
+- .maxauthsize = AES_BLOCK_SIZE,
+- },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+- },
+- /* Galois Counter Mode */
+- {
+- .name = "gcm(aes)",
+- .driver_name = "gcm-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_AEAD,
+- .template_aead = {
+- .setkey = gcm_setkey,
+- .setauthsize = gcm_setauthsize,
+- .encrypt = aead_encrypt,
+- .decrypt = aead_decrypt,
+- .givencrypt = NULL,
+- .geniv = "<built-in>",
+- .ivsize = 12,
+- .maxauthsize = AES_BLOCK_SIZE,
+- },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+- /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+- .givencrypt = ablkcipher_givencrypt,
+- .geniv = "<built-in>",
++ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+@@ -4054,16 +2129,31 @@
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
++ .name = "ecb(des3_ede)",
++ .driver_name = "ecb-des3-caam",
++ .blocksize = DES3_EDE_BLOCK_SIZE,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
++ .template_ablkcipher = {
++ .setkey = ablkcipher_setkey,
++ .encrypt = ablkcipher_encrypt,
++ .decrypt = ablkcipher_decrypt,
++ .geniv = "eseqiv",
++ .min_keysize = DES3_EDE_KEY_SIZE,
++ .max_keysize = DES3_EDE_KEY_SIZE,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ },
++ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB,
++ },
++ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+- .givencrypt = ablkcipher_givencrypt,
+- .geniv = "<built-in>",
++ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+@@ -4074,58 +2164,23 @@
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
++ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+- .givencrypt = ablkcipher_givencrypt,
+- .geniv = "<built-in>",
++ .geniv = "eseqiv",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+- },
+- {
+- .name = "ctr(aes)",
+- .driver_name = "ctr-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+- .template_ablkcipher = {
+- .setkey = ablkcipher_setkey,
+- .encrypt = ablkcipher_encrypt,
+- .decrypt = ablkcipher_decrypt,
+- .geniv = "chainiv",
+- .min_keysize = AES_MIN_KEY_SIZE,
+- .max_keysize = AES_MAX_KEY_SIZE,
+- .ivsize = AES_BLOCK_SIZE,
+- },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+- },
+- {
+- .name = "rfc3686(ctr(aes))",
+- .driver_name = "rfc3686-ctr-aes-caam",
+- .blocksize = 1,
+- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
+- .template_ablkcipher = {
+- .setkey = ablkcipher_setkey,
+- .encrypt = ablkcipher_encrypt,
+- .decrypt = ablkcipher_decrypt,
+- .givencrypt = ablkcipher_givencrypt,
+- .geniv = "<built-in>",
+- .min_keysize = AES_MIN_KEY_SIZE +
+- CTR_RFC3686_NONCE_SIZE,
+- .max_keysize = AES_MAX_KEY_SIZE +
+- CTR_RFC3686_NONCE_SIZE,
+- .ivsize = CTR_RFC3686_IV_SIZE,
+- },
+- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ }
+ };
+
+ struct caam_crypto_alg {
+ struct list_head entry;
++ struct device *ctrldev;
+ int class1_alg_type;
+ int class2_alg_type;
+ int alg_op;
+@@ -4138,12 +2193,14 @@
+ struct caam_crypto_alg *caam_alg =
+ container_of(alg, struct caam_crypto_alg, crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct caam_drv_private *priv = dev_get_drvdata(caam_alg->ctrldev);
++ int tgt_jr = atomic_inc_return(&priv->tfm_count);
+
+- ctx->jrdev = caam_jr_alloc();
+- if (IS_ERR(ctx->jrdev)) {
+- pr_err("Job Ring Device allocation for transform failed\n");
+- return PTR_ERR(ctx->jrdev);
+- }
++ /*
++ * distribute tfms across job rings to ensure in-order
++ * crypto request processing per tfm
++ */
++ ctx->jrdev = priv->algapi_jr[(tgt_jr / 2) % priv->num_jrs_for_algapi];
+
+ /* copy descriptor header template value */
+ ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
+@@ -4170,31 +2227,57 @@
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(ctx->sh_desc_givenc),
+ DMA_TO_DEVICE);
+- if (ctx->key_dma &&
+- !dma_mapping_error(ctx->jrdev, ctx->key_dma))
+- dma_unmap_single(ctx->jrdev, ctx->key_dma,
+- ctx->enckeylen + ctx->split_key_pad_len,
+- DMA_TO_DEVICE);
+-
+- caam_jr_free(ctx->jrdev);
+ }
+
+ static void __exit caam_algapi_exit(void)
+ {
+
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++ struct device *ctrldev;
++ struct caam_drv_private *priv;
+ struct caam_crypto_alg *t_alg, *n;
++ int i, err;
++
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev) {
++ of_node_put(dev_node);
++ return;
++ }
+
+- if (!alg_list.next)
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++
++ if (!priv->alg_list.next) {
++ of_node_put(dev_node);
+ return;
++ }
+
+- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
++ list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
+ crypto_unregister_alg(&t_alg->crypto_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
++
++ for (i = 0; i < priv->total_jobrs; i++) {
++ err = caam_jr_deregister(priv->algapi_jr[i]);
++ if (err < 0)
++ break;
++ }
++ kfree(priv->algapi_jr);
++
++ of_node_put(dev_node);
+ }
+
+-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
++static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
++ struct caam_alg_template
+ *template)
+ {
+ struct caam_crypto_alg *t_alg;
+@@ -4202,7 +2285,7 @@
+
+ t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
+ if (!t_alg) {
+- pr_err("failed to allocate t_alg\n");
++ dev_err(ctrldev, "failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -4218,13 +2301,13 @@
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_ctxsize = sizeof(struct caam_ctx);
+- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
+- template->type;
++ alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
++
++#ifdef CRYPTO_ALG_KERN_DRIVER_ONLY
++ alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
++#endif
++
+ switch (template->type) {
+- case CRYPTO_ALG_TYPE_GIVCIPHER:
+- alg->cra_type = &crypto_givcipher_type;
+- alg->cra_ablkcipher = template->template_ablkcipher;
+- break;
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+@@ -4238,6 +2321,7 @@
+ t_alg->class1_alg_type = template->class1_alg_type;
+ t_alg->class2_alg_type = template->class2_alg_type;
+ t_alg->alg_op = template->alg_op;
++ t_alg->ctrldev = ctrldev;
+
+ return t_alg;
+ }
+@@ -4246,9 +2330,11 @@
+ {
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+- struct device *ctrldev;
+- void *priv;
+- int i = 0, err = 0;
++ struct device *ctrldev, **jrdev;
++ struct caam_drv_private *priv;
++ int i = 0, err = 0, md_limit = 0;
++ int des_inst, aes_inst, md_inst;
++ u64 cha_inst;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+@@ -4265,42 +2351,117 @@
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
++ INIT_LIST_HEAD(&priv->alg_list);
++
++ jrdev = kmalloc(sizeof(*jrdev) * priv->total_jobrs, GFP_ATOMIC);
++ if (!jrdev) {
++ of_node_put(dev_node);
++ return -ENOMEM;
++ }
+
++ for (i = 0; i < priv->total_jobrs; i++) {
++ err = caam_jr_register(ctrldev, &jrdev[i]);
++ if (err < 0)
++ break;
++ }
++ if (err < 0 && i == 0) {
++ dev_err(ctrldev, "algapi error in job ring registration: %d\n",
++ err);
++ of_node_put(dev_node);
++ kfree(jrdev);
++ return err;
++ }
+
+- INIT_LIST_HEAD(&alg_list);
++ priv->num_jrs_for_algapi = i;
++ priv->algapi_jr = jrdev;
++ atomic_set(&priv->tfm_count, -1);
++
++ /*
++ * register crypto algorithms the device supports
++ * first, detect presence of DES, AES, and MD blocks. If MD present,
++ * determine limit of supported digest size
++ */
++ cha_inst = rd_reg64(&priv->ctrl->perfmon.cha_num);
++ des_inst = (cha_inst & CHA_ID_DES_MASK) >> CHA_ID_DES_SHIFT;
++ aes_inst = (cha_inst & CHA_ID_AES_MASK) >> CHA_ID_AES_SHIFT;
++ md_inst = (cha_inst & CHA_ID_MD_MASK) >> CHA_ID_MD_SHIFT;
++ if (md_inst) {
++ md_limit = SHA512_DIGEST_SIZE;
++ if ((rd_reg64(&priv->ctrl->perfmon.cha_id) & CHA_ID_MD_MASK)
++ == CHA_ID_MD_LP256) /* LP256 limits digest size */
++ md_limit = SHA256_DIGEST_SIZE;
++ }
+
+- /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+- /* TODO: check if h/w supports alg */
+ struct caam_crypto_alg *t_alg;
++ bool done = false;
++
++authencesn:
++ /*
++ * All registrable algs in this module require a blockcipher
++ * All aead algs require message digests, so check them for
++ * instantiation and size.
++ */
++ if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD) {
++ /* If no MD instantiated, or MD too small, skip */
++ if ((!md_inst) ||
++ (driver_algs[i].template_aead.maxauthsize >
++ md_limit))
++ continue;
++ }
++ /* If DES alg, and CHA not instantiated, skip */
++ if ((driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_3DES) ||
++ (driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_DES))
++ if (!des_inst)
++ continue;
++ /* If AES alg, and CHA not instantiated, skip */
++ if (driver_algs[i].class1_alg_type & OP_ALG_ALGSEL_AES)
++ if (!aes_inst)
++ continue;
+
+- t_alg = caam_alg_alloc(&driver_algs[i]);
++ t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+- pr_warn("%s alg allocation failed\n",
+- driver_algs[i].driver_name);
++ dev_warn(ctrldev, "%s alg allocation failed\n",
++ driver_algs[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_alg(&t_alg->crypto_alg);
+ if (err) {
+- pr_warn("%s alg registration failed\n",
++ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->crypto_alg.cra_driver_name);
+ kfree(t_alg);
+- } else
+- list_add_tail(&t_alg->entry, &alg_list);
++ } else {
++ list_add_tail(&t_alg->entry, &priv->alg_list);
++ dev_info(ctrldev, "%s\n",
++ t_alg->crypto_alg.cra_driver_name);
++
++ if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
++ !memcmp(driver_algs[i].name, "authenc", 7) &&
++ !done) {
++ char *name;
++
++ name = driver_algs[i].name;
++ memmove(name + 10, name + 7, strlen(name) - 7);
++ memcpy(name + 7, "esn", 3);
++
++ name = driver_algs[i].driver_name;
++ memmove(name + 10, name + 7, strlen(name) - 7);
++ memcpy(name + 7, "esn", 3);
++
++ done = true;
++ goto authencesn;
++ }
++ }
+ }
+- if (!list_empty(&alg_list))
+- pr_info("caam algorithms registered in /proc/crypto\n");
+
++ if (!list_empty(&priv->alg_list))
++ dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",
++ (char *)of_get_property(dev_node, "compatible", NULL));
++
++ of_node_put(dev_node);
+ return err;
+ }
+
+diff -Nur linux-4.1.3/drivers/crypto/caam/caamhash.c linux-xbian-imx6/drivers/crypto/caam/caamhash.c
+--- linux-4.1.3/drivers/crypto/caam/caamhash.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/caamhash.c 2015-07-27 23:13:04.209961631 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * caam - Freescale FSL CAAM support for ahash functions of crypto API
+ *
+- * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+@@ -62,6 +62,7 @@
+ #include "error.h"
+ #include "sg_sw_sec4.h"
+ #include "key_gen.h"
++#include <linux/string.h>
+
+ #define CAAM_CRA_PRIORITY 3000
+
+@@ -72,6 +73,8 @@
+ #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+
+ /* length of descriptors text */
++#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
++
+ #define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
+ #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+ #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+@@ -89,14 +92,13 @@
+
+ #ifdef DEBUG
+ /* for print_hex_dumps with line references */
++#define xstr(s) str(s)
++#define str(s) #s
+ #define debug(format, arg...) printk(format, arg)
+ #else
+ #define debug(format, arg...)
+ #endif
+
+-
+-static struct list_head hash_list;
+-
+ /* ahash per-session context */
+ struct caam_hash_ctx {
+ struct device *jrdev;
+@@ -115,6 +117,7 @@
+ u8 key[CAAM_MAX_HASH_KEY_SIZE];
+ dma_addr_t key_dma;
+ int ctx_len;
++ unsigned int key_len;
+ unsigned int split_key_len;
+ unsigned int split_key_pad_len;
+ };
+@@ -137,20 +140,13 @@
+ /* Common job descriptor seq in/out ptr routines */
+
+ /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
+-static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+- struct caam_hash_state *state,
+- int ctx_len)
++static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
++ struct caam_hash_state *state,
++ int ctx_len)
+ {
+ state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
+ ctx_len, DMA_FROM_DEVICE);
+- if (dma_mapping_error(jrdev, state->ctx_dma)) {
+- dev_err(jrdev, "unable to map ctx\n");
+- return -ENOMEM;
+- }
+-
+ append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+-
+- return 0;
+ }
+
+ /* Map req->result, and append seq_out_ptr command that points to it */
+@@ -173,6 +169,7 @@
+ dma_addr_t buf_dma;
+
+ buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
++ dma_sync_single_for_device(jrdev, buf_dma, buflen, DMA_TO_DEVICE);
+ dma_to_sec4_sg_one(sec4_sg, buf_dma, buflen, 0);
+
+ return buf_dma;
+@@ -208,19 +205,17 @@
+ }
+
+ /* Map state->caam_ctx, and add it to link table */
+-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+- struct caam_hash_state *state, int ctx_len,
+- struct sec4_sg_entry *sec4_sg, u32 flag)
++static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
++ struct caam_hash_state *state,
++ int ctx_len,
++ struct sec4_sg_entry *sec4_sg,
++ u32 flag)
+ {
+ state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+- if (dma_mapping_error(jrdev, state->ctx_dma)) {
+- dev_err(jrdev, "unable to map ctx\n");
+- return -ENOMEM;
+- }
+-
++ if ((flag == DMA_TO_DEVICE) || (flag == DMA_BIDIRECTIONAL))
++ dma_sync_single_for_device(jrdev, state->ctx_dma, ctx_len,
++ flag);
+ dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+-
+- return 0;
+ }
+
+ /* Common shared descriptor commands */
+@@ -231,6 +226,13 @@
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ }
+
++static inline void append_key_axcbc(u32 *desc, struct caam_hash_ctx *ctx)
++{
++ append_key_as_imm(desc, ctx->key, ctx->key_len,
++ ctx->key_len, CLASS_1 |
++ KEY_DEST_CLASS_REG);
++}
++
+ /* Append key if it has been set */
+ static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
+ {
+@@ -252,6 +254,25 @@
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+ }
+
++static inline void init_sh_desc_key_axcbc(u32 *desc, struct caam_hash_ctx *ctx)
++{
++ u32 *key_jump_cmd;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ if (ctx->key_len) {
++ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ append_key_axcbc(desc, ctx);
++
++ set_jump_tgt_here(desc, key_jump_cmd);
++ }
++
++ /* Propagate errors from shared to job descriptor */
++ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
++
++}
+ /*
+ * For ahash read data from seqin following state->caam_ctx,
+ * and write resulting class2 context to seqout, which may be state->caam_ctx
+@@ -271,6 +292,20 @@
+ LDST_SRCDST_BYTE_CONTEXT);
+ }
+
++static inline void axcbc_append_load_str(u32 *desc, int digestsize)
++{
++ /* Calculate remaining bytes to read */
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /* Read remaining bytes */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 |
++ FIFOLD_TYPE_MSG | KEY_VLF);
++
++ /* Store class1 context bytes */
++ append_seq_store(desc, digestsize, LDST_CLASS_1_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++}
++
+ /*
+ * For ahash update, final and finup, import context, read and write to seqout
+ */
+@@ -293,6 +328,27 @@
+ ahash_append_load_str(desc, digestsize);
+ }
+
++/*
++ * For ahash update, final and finup, import context, read and write to seqout
++ */
++static inline void axcbc_ctx_data_to_out(u32 *desc, u32 op, u32 state,
++ int digestsize,
++ struct caam_hash_ctx *ctx)
++{
++ init_sh_desc_key_axcbc(desc, ctx);
++
++ /* Import context from software */
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | ctx->ctx_len);
++
++ /* Class 1 operation */
++ append_operation(desc, op | state | OP_ALG_ENCRYPT);
++
++ /*
++ * Load from buf and/or src and write to req->result or state->context
++ */
++ axcbc_append_load_str(desc, digestsize);
++}
+ /* For ahash firsts and digest, read and write to seqout */
+ static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
+ int digestsize, struct caam_hash_ctx *ctx)
+@@ -308,6 +364,21 @@
+ ahash_append_load_str(desc, digestsize);
+ }
+
++/* For ahash firsts and digest, read and write to seqout */
++static inline void axcbc_data_to_out(u32 *desc, u32 op, u32 state,
++ int digestsize, struct caam_hash_ctx *ctx)
++{
++ init_sh_desc_key_axcbc(desc, ctx);
++
++ /* Class 1 operation */
++ append_operation(desc, op | state | OP_ALG_ENCRYPT);
++
++ /*
++ * Load from buf and/or src and write to req->result or state->context
++ */
++ axcbc_append_load_str(desc, digestsize);
++}
++
+ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+ {
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+@@ -342,8 +413,7 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ahash update shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
+
+@@ -361,10 +431,11 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ahash update first shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_final shared descriptor */
+ desc = ctx->sh_desc_fin;
+@@ -379,10 +450,12 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_finup shared descriptor */
+ desc = ctx->sh_desc_finup;
+@@ -397,10 +470,12 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ahash finup shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_finup_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ /* ahash_digest shared descriptor */
+ desc = ctx->sh_desc_digest;
+@@ -416,15 +491,134 @@
+ return -ENOMEM;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "ahash digest shdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
+
+ return 0;
+ }
+
++static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
++{
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct device *jrdev = ctx->jrdev;
++ u32 have_key = 0;
++ u32 *desc;
++
++ /* ahash_update shared descriptor */
++ desc = ctx->sh_desc_update;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Import context from software */
++ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
++ LDST_CLASS_1_CCB | ctx->ctx_len);
++
++ /* Class 1 operation */
++ append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
++ OP_ALG_ENCRYPT);
++
++ /* Load data and write to result or context */
++ axcbc_append_load_str(desc, ctx->ctx_len);
++
++ ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
++ dev_err(jrdev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ahash update shdesc@"xstr(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ /* ahash_update_first shared descriptor */
++ desc = ctx->sh_desc_update_first;
++
++ axcbc_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
++ ctx->ctx_len, ctx);
++
++ ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
++ desc_bytes(desc),
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
++ dev_err(jrdev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ahash update first shdesc@"xstr(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
++
++ /* ahash_final shared descriptor */
++ desc = ctx->sh_desc_fin;
++
++ axcbc_ctx_data_to_out(desc, have_key | ctx->alg_type,
++ OP_ALG_AS_FINALIZE, digestsize, ctx);
++
++ ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
++ dev_err(jrdev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ahash final shdesc@"xstr(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc,
++ desc_bytes(desc), 1);
++#endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
++
++ /* ahash_finup shared descriptor */
++ desc = ctx->sh_desc_finup;
++
++ axcbc_ctx_data_to_out(desc, have_key | ctx->alg_type,
++ OP_ALG_AS_FINALIZE, digestsize, ctx);
++
++ ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) {
++ dev_err(jrdev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ahash finup shdesc@"xstr(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc,
++ desc_bytes(desc), 1);
++#endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_finup_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
++
++ /* ahash_digest shared descriptor */
++ desc = ctx->sh_desc_digest;
++
++ axcbc_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
++ digestsize, ctx);
++
++ ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
++ desc_bytes(desc),
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
++ dev_err(jrdev, "unable to map shared descriptor\n");
++ return -ENOMEM;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ahash digest shdesc@"xstr(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc,
++ desc_bytes(desc), 1);
++#endif
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
++ desc_bytes(desc), DMA_TO_DEVICE);
++
++ return 0;
++}
+ static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 keylen)
+ {
+@@ -458,6 +652,8 @@
+ kfree(desc);
+ return -ENOMEM;
+ }
++ dma_sync_single_for_device(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
++
+ dst_dma = dma_map_single(jrdev, (void *)key_out, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(jrdev, dst_dma)) {
+@@ -478,9 +674,9 @@
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "key_in@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "key_in@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
+
+@@ -493,17 +689,17 @@
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.err;
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR,
+- "digested key@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "digested key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ digestsize, 1);
+ #endif
+ }
++ *keylen = digestsize;
++
+ dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
++ dma_sync_single_for_cpu(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
+
+- *keylen = digestsize;
+-
+ kfree(desc);
+
+ return ret;
+@@ -545,7 +741,7 @@
+ #ifdef DEBUG
+ printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+ ctx->split_key_len, ctx->split_key_pad_len);
+- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ #endif
+
+@@ -557,11 +753,14 @@
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+- ret = -ENOMEM;
+- goto map_err;
++ return -ENOMEM;
+ }
++
++ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->split_key_pad_len,
++ DMA_TO_DEVICE);
++
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->split_key_pad_len, 1);
+ #endif
+@@ -572,7 +771,6 @@
+ DMA_TO_DEVICE);
+ }
+
+-map_err:
+ kfree(hashed_key);
+ return ret;
+ badkey:
+@@ -581,6 +779,25 @@
+ return -EINVAL;
+ }
+
++static int axcbc_setkey(struct crypto_ahash *ahash,
++ const u8 *key, unsigned int keylen)
++{
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int ret = 0;
++
++ ctx->key_len = keylen;
++ memcpy(ctx->key, key, keylen);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++ ctx->key_len, 1);
++#endif
++
++ ret = axcbc_set_sh_desc(ahash);
++
++ return ret;
++}
+ /*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: physical mapped address of req->result
+@@ -608,8 +825,11 @@
+ if (edesc->src_nents)
+ dma_unmap_sg_chained(dev, req->src, edesc->src_nents,
+ DMA_TO_DEVICE, edesc->chained);
+- if (edesc->dst_dma)
++ if (edesc->dst_dma) {
++ dma_sync_single_for_cpu(dev, edesc->dst_dma, dst_len,
++ DMA_FROM_DEVICE);
+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
++ }
+
+ if (edesc->sec4_sg_bytes)
+ dma_unmap_single(dev, edesc->sec4_sg_dma,
+@@ -624,8 +844,12 @@
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+- if (state->ctx_dma)
++ if (state->ctx_dma) {
++ if ((flag == DMA_FROM_DEVICE) || (flag == DMA_BIDIRECTIONAL))
++ dma_sync_single_for_cpu(dev, state->ctx_dma,
++ ctx->ctx_len, flag);
+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
++ }
+ ahash_unmap(dev, edesc, req, dst_len);
+ }
+
+@@ -645,18 +869,21 @@
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+ ahash_unmap(jrdev, edesc, req, digestsize);
+ kfree(edesc);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+ #endif
+@@ -680,18 +907,21 @@
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ kfree(edesc);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+ #endif
+@@ -715,18 +945,21 @@
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
+
+- ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
++
++ ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ kfree(edesc);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+ #endif
+@@ -750,18 +983,21 @@
+
+ edesc = (struct ahash_edesc *)((char *)desc -
+ offsetof(struct ahash_edesc, hw_desc));
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+- ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
++ ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ kfree(edesc);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ctx@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+- print_hex_dump(KERN_ERR, "result@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "result@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+ #endif
+@@ -807,7 +1043,7 @@
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
++ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+@@ -820,11 +1056,12 @@
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes,
++ DMA_TO_DEVICE);
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+- edesc->sec4_sg, DMA_BIDIRECTIONAL);
+- if (ret)
+- return ret;
++ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++ edesc->sec4_sg, DMA_BIDIRECTIONAL);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
+ edesc->sec4_sg + 1,
+@@ -851,21 +1088,16 @@
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return -ENOMEM;
+- }
+-
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+ to_hash, LDST_SGF);
+
+ append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0);
+
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
++ sec4_sg_bytes, DMA_TO_DEVICE);
++
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
+@@ -885,9 +1117,9 @@
+ *next_buflen = last_buflen;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+ #endif
+@@ -918,7 +1150,7 @@
+ sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
++ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+@@ -932,37 +1164,29 @@
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->src_nents = 0;
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+- edesc->sec4_sg, DMA_TO_DEVICE);
+- if (ret)
+- return ret;
++ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
++ DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ buf, state->buf_dma, buflen,
+ last_buflen);
+ (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
+
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return -ENOMEM;
+- }
+-
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
+ LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+- dev_err(jrdev, "unable to map dst\n");
+- return -ENOMEM;
+- }
++
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
++ DMA_TO_DEVICE);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
+
+@@ -1005,7 +1229,7 @@
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
++ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+@@ -1021,11 +1245,11 @@
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes, DMA_TO_DEVICE);
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+- edesc->sec4_sg, DMA_TO_DEVICE);
+- if (ret)
+- return ret;
++ ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
++ DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ buf, state->buf_dma, buflen,
+@@ -1034,25 +1258,17 @@
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
+ sec4_sg_src_index, chained);
+
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return -ENOMEM;
+- }
+-
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
+ buflen + req->nbytes, LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+- dev_err(jrdev, "unable to map dst\n");
+- return -ENOMEM;
+- }
++
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
++ DMA_TO_DEVICE);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
+
+@@ -1091,7 +1307,7 @@
+ sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
++ edesc = kzalloc(sizeof(struct ahash_edesc) + sec4_sg_bytes +
+ DESC_JOB_IO_LEN, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+@@ -1099,6 +1315,8 @@
+ }
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes, DMA_TO_DEVICE);
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->src_nents = src_nents;
+ edesc->chained = chained;
+@@ -1109,12 +1327,6 @@
+
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return -ENOMEM;
+- }
+ src_dma = edesc->sec4_sg_dma;
+ options = LDST_SGF;
+ } else {
+@@ -1123,15 +1335,14 @@
+ }
+ append_seq_in_ptr(desc, src_dma, req->nbytes, options);
+
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
++ edesc->sec4_sg_bytes, DMA_TO_DEVICE);
++
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+- dev_err(jrdev, "unable to map dst\n");
+- return -ENOMEM;
+- }
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
+
+@@ -1165,7 +1376,7 @@
+ int sh_len;
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
++ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN,
+ GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+@@ -1178,23 +1389,17 @@
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, state->buf_dma)) {
+- dev_err(jrdev, "unable to map src\n");
+- return -ENOMEM;
+- }
+
+ append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+- dev_err(jrdev, "unable to map dst\n");
+- return -ENOMEM;
+- }
+ edesc->src_nents = 0;
+
++ dma_sync_single_for_device(jrdev, state->buf_dma, buflen,
++ DMA_TO_DEVICE);
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
+
+@@ -1245,7 +1450,7 @@
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
++ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+@@ -1258,7 +1463,9 @@
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+- edesc->dst_dma = 0;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes,
++ DMA_TO_DEVICE);
+
+ state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
+ buf, *buflen);
+@@ -1276,22 +1483,14 @@
+ init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
+ HDR_REVERSE);
+
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return -ENOMEM;
+- }
+-
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
+
+- ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+- if (ret)
+- return ret;
++ map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
++ sec4_sg_bytes, DMA_TO_DEVICE);
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
+@@ -1314,9 +1513,9 @@
+ *next_buflen = 0;
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "buf@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+ #endif
+@@ -1352,7 +1551,7 @@
+ sizeof(struct sec4_sg_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
++ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+@@ -1368,6 +1567,8 @@
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes, DMA_TO_DEVICE);
+
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
+ state->buf_dma, buflen,
+@@ -1376,25 +1577,17 @@
+ src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
+ chained);
+
+- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+- sec4_sg_bytes, DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return -ENOMEM;
+- }
+-
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
+ req->nbytes, LDST_SGF);
+
+ edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
+ digestsize);
+- if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+- dev_err(jrdev, "unable to map dst\n");
+- return -ENOMEM;
+- }
++
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma, sec4_sg_bytes,
++ DMA_TO_DEVICE);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
+
+@@ -1447,7 +1640,7 @@
+ * allocate space for base edesc and hw desc commands,
+ * link tables
+ */
+- edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
++ edesc = kzalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+ sec4_sg_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev,
+@@ -1460,19 +1653,13 @@
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+ edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
+ DESC_JOB_IO_LEN;
+- edesc->dst_dma = 0;
++ edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
++ sec4_sg_bytes,
++ DMA_TO_DEVICE);
+
+ if (src_nents) {
+ sg_to_sec4_sg_last(req->src, src_nents,
+ edesc->sec4_sg, 0);
+- edesc->sec4_sg_dma = dma_map_single(jrdev,
+- edesc->sec4_sg,
+- sec4_sg_bytes,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+- dev_err(jrdev, "unable to map S/G table\n");
+- return -ENOMEM;
+- }
+ src_dma = edesc->sec4_sg_dma;
+ options = LDST_SGF;
+ } else {
+@@ -1491,12 +1678,12 @@
+
+ append_seq_in_ptr(desc, src_dma, to_hash, options);
+
+- ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+- if (ret)
+- return ret;
++ map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+
++ dma_sync_single_for_device(jrdev, edesc->sec4_sg_dma,
++ sec4_sg_bytes, DMA_TO_DEVICE);
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+ #endif
+@@ -1521,7 +1708,7 @@
+ req->nbytes, 0);
+ }
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "next buf@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
+ *next_buflen, 1);
+ #endif
+@@ -1735,10 +1922,33 @@
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
++ {
++ .name = "xcbc(aes)",
++ .driver_name = "xcbc-aes-caam",
++ .hmac_name = "xcbc(aes)",
++ .hmac_driver_name = "xcbc-aes-caam",
++ .blocksize = XCBC_MAC_BLOCK_WORDS * 4,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = axcbc_setkey,
++ .halg = {
++ .digestsize = XCBC_MAC_DIGEST_SIZE,
++ },
++ },
++ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
++ .alg_op = OP_ALG_ALGSEL_AES,
++ },
+ };
+
+ struct caam_hash_alg {
+ struct list_head entry;
++ struct device *ctrldev;
+ int alg_type;
+ int alg_op;
+ struct ahash_alg ahash_alg;
+@@ -1755,6 +1965,7 @@
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+@@ -1762,17 +1973,15 @@
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
++ int tgt_jr = atomic_inc_return(&priv->tfm_count);
+ int ret = 0;
+
+ /*
+- * Get a Job ring from Job Ring driver to ensure in-order
++ * distribute tfms across job rings to ensure in-order
+ * crypto request processing per tfm
+ */
+- ctx->jrdev = caam_jr_alloc();
+- if (IS_ERR(ctx->jrdev)) {
+- pr_err("Job Ring Device allocation for transform failed\n");
+- return PTR_ERR(ctx->jrdev);
+- }
++ ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
++
+ /* copy descriptor header template value */
+ ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+ ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
+@@ -1788,6 +1997,39 @@
+ return ret;
+ }
+
++static int caam_axcbc_cra_init(struct crypto_tfm *tfm)
++{
++ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
++ struct crypto_alg *base = tfm->__crt_alg;
++ struct hash_alg_common *halg =
++ container_of(base, struct hash_alg_common, base);
++ struct ahash_alg *alg =
++ container_of(halg, struct ahash_alg, halg);
++ struct caam_hash_alg *caam_hash =
++ container_of(alg, struct caam_hash_alg, ahash_alg);
++ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct caam_drv_private *priv = dev_get_drvdata(caam_hash->ctrldev);
++ int tgt_jr = atomic_inc_return(&priv->tfm_count);
++ int ret = 0;
++
++ /*
++ * distribute tfms across job rings to ensure in-order
++ * crypto request processing per tfm
++ */
++ ctx->jrdev = priv->jrdev[tgt_jr % priv->total_jobrs];
++
++ /* copy descriptor header template value */
++ ctx->alg_type = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
++ ctx->alg_op = OP_TYPE_CLASS1_ALG | caam_hash->alg_op;
++
++ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
++ sizeof(struct caam_hash_state));
++
++ ret = axcbc_set_sh_desc(ahash);
++
++ return ret;
++}
++
+ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+ {
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+@@ -1815,35 +2057,57 @@
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma,
+ desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE);
+-
+- caam_jr_free(ctx->jrdev);
+ }
+
+ static void __exit caam_algapi_hash_exit(void)
+ {
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++ struct device *ctrldev;
++ struct caam_drv_private *priv;
+ struct caam_hash_alg *t_alg, *n;
+
+- if (!hash_list.next)
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev) {
++ of_node_put(dev_node);
+ return;
++ }
+
+- list_for_each_entry_safe(t_alg, n, &hash_list, entry) {
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++
++ if (!priv->hash_list.next) {
++ of_node_put(dev_node);
++ return;
++ }
++
++ list_for_each_entry_safe(t_alg, n, &priv->hash_list, entry) {
+ crypto_unregister_ahash(&t_alg->ahash_alg);
+ list_del(&t_alg->entry);
+ kfree(t_alg);
+ }
++
++ of_node_put(dev_node);
+ }
+
+ static struct caam_hash_alg *
+-caam_hash_alloc(struct caam_hash_template *template,
++caam_hash_alloc(struct device *ctrldev, struct caam_hash_template *template,
+ bool keyed)
+ {
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+- t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_KERNEL);
++ t_alg = kzalloc(sizeof(struct caam_hash_alg), GFP_ATOMIC);
+ if (!t_alg) {
+- pr_err("failed to allocate t_alg\n");
++ dev_err(ctrldev, "failed to allocate t_alg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -1863,7 +2127,11 @@
+ template->driver_name);
+ }
+ alg->cra_module = THIS_MODULE;
+- alg->cra_init = caam_hash_cra_init;
++
++ if (strstr(alg->cra_name, "xcbc") > 0)
++ alg->cra_init = caam_axcbc_cra_init;
++ else
++ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+@@ -1874,6 +2142,7 @@
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->alg_op = template->alg_op;
++ t_alg->ctrldev = ctrldev;
+
+ return t_alg;
+ }
+@@ -1883,8 +2152,9 @@
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+- void *priv;
+- int i = 0, err = 0;
++ struct caam_drv_private *priv;
++ int i = 0, err = 0, md_limit = 0, md_inst;
++ u64 cha_inst;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+@@ -1898,60 +2168,68 @@
+ of_node_put(dev_node);
+ return -ENODEV;
+ }
+-
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+- of_node_put(dev_node);
+
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
+- return -ENODEV;
++ INIT_LIST_HEAD(&priv->hash_list);
+
+- INIT_LIST_HEAD(&hash_list);
++ atomic_set(&priv->tfm_count, -1);
++
++ /* register algorithms the device supports */
++ cha_inst = rd_reg64(&priv->ctrl->perfmon.cha_num);
++ md_inst = (cha_inst & CHA_ID_MD_MASK) >> CHA_ID_MD_SHIFT;
++ if (md_inst) {
++ md_limit = SHA512_DIGEST_SIZE;
++ if ((rd_reg64(&priv->ctrl->perfmon.cha_id) & CHA_ID_MD_MASK)
++ == CHA_ID_MD_LP256) /* LP256 limits digest size */
++ md_limit = SHA256_DIGEST_SIZE;
++ }
+
+- /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+- /* TODO: check if h/w supports alg */
+ struct caam_hash_alg *t_alg;
+
++ /* If no MD instantiated, or MD too small, skip */
++ if ((!md_inst) ||
++ (driver_hash[i].template_ahash.halg.digestsize >
++ md_limit))
++ continue;
++
+ /* register hmac version */
+- t_alg = caam_hash_alloc(&driver_hash[i], true);
++ t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+- pr_warn("%s alg allocation failed\n",
+- driver_hash[i].driver_name);
++ dev_warn(ctrldev, "%s alg allocation failed\n",
++ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+- pr_warn("%s alg registration failed\n",
++ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+- list_add_tail(&t_alg->entry, &hash_list);
++ list_add_tail(&t_alg->entry, &priv->hash_list);
+
+ /* register unkeyed version */
+- t_alg = caam_hash_alloc(&driver_hash[i], false);
++ t_alg = caam_hash_alloc(ctrldev, &driver_hash[i], false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+- pr_warn("%s alg allocation failed\n",
+- driver_hash[i].driver_name);
++ dev_warn(ctrldev, "%s alg allocation failed\n",
++ driver_hash[i].driver_name);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+- pr_warn("%s alg registration failed\n",
++ dev_warn(ctrldev, "%s alg registration failed\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name);
+ kfree(t_alg);
+ } else
+- list_add_tail(&t_alg->entry, &hash_list);
++ list_add_tail(&t_alg->entry, &priv->hash_list);
+ }
+
++ of_node_put(dev_node);
+ return err;
+ }
+
+diff -Nur linux-4.1.3/drivers/crypto/caam/caam_keyblob.c linux-xbian-imx6/drivers/crypto/caam/caam_keyblob.c
+--- linux-4.1.3/drivers/crypto/caam/caam_keyblob.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/crypto/caam/caam_keyblob.c 2015-07-27 23:13:04.205975852 +0200
+@@ -0,0 +1,687 @@
++/*
++ * Key blob driver based on CAAM hardware
++ *
++ * Copyright (C) 2015 Freescale Semiconductor, Inc.
++ */
++
++#include <linux/of_irq.h>
++#include <linux/of_address.h>
++
++#include "compat.h"
++#include "regs.h"
++#include "jr.h"
++#include "desc.h"
++#include "intern.h"
++#include "sm.h"
++#include "caam_keyblob.h"
++
++#define INITIAL_DESCSZ 16 /* size of tmp buffer for descriptor const. */
++
++/**
++ * struct kb_device - the metadata of the caam key blob device node
++ * @dev: the actual misc device
++ */
++struct kb_device {
++ struct miscdevice misc_dev;
++ struct device *jr_dev;
++};
++
++/*
++ * Pseudo-synchronous ring access functions for carrying out key
++ * encapsulation and decapsulation
++ */
++
++struct sm_key_job_result {
++ int error;
++ struct completion completion;
++};
++
++
++static struct kb_device *kb_dev;
++
++static struct kb_device *kb_device_create(void);
++static int kb_device_destroy(struct kb_device *kb_dev);
++static int kb_open(struct inode *inode, struct file *file);
++static int kb_release(struct inode *inode, struct file *file);
++static void sm_key_job_done(struct device *dev, u32 *desc,
++ u32 err, void *context);
++static int gen_mem_encap(struct device *jr_dev, void __user *secretbuf,
++ int keylen, void __user *kmodbuf, void __user *outbuf);
++static int gen_mem_decap(struct device *jr_dev, void __user *keyblobbuf,
++ int bloblen, void __user *kmodbuf, void __user *outbuf);
++static long kb_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
++static int caam_keyblob_probe(struct platform_device *pdev);
++static int caam_keyblob_remove(struct platform_device *pdev);
++
++static int kb_open(struct inode *inode, struct file *file)
++{
++ struct miscdevice *miscdev = file->private_data;
++ struct kb_device *dev = container_of(miscdev, struct kb_device, misc_dev);
++ struct device *jr_dev;
++
++ if (!dev->jr_dev) {
++ jr_dev = caam_jr_alloc();
++ if (IS_ERR(jr_dev)) {
++ pr_err("Job Ring Device allocation for transform failed\n");
++ return -ENOMEM;
++ }
++ pr_info("Allocate a job ring device\n");
++ dev->jr_dev = jr_dev;
++ }
++ else {
++ pr_err("Already created a job ring device");
++ return -EPERM;
++ }
++
++ return 0;
++}
++
++static int kb_release(struct inode *inode, struct file *file)
++{
++ struct miscdevice *miscdev = file->private_data;
++ struct kb_device *dev = container_of(miscdev, struct kb_device, misc_dev);
++
++ if (dev && dev->jr_dev) {
++ caam_jr_free(dev->jr_dev);
++ pr_info("Free a job ring device\n");
++ dev->jr_dev = NULL;
++ }
++ return 0;
++}
++
++static void sm_key_job_done(struct device *dev, u32 *desc,
++ u32 err, void *context)
++{
++ struct sm_key_job_result *res = context;
++
++ res->error = err; /* save off the error for postprocessing */
++ complete(&res->completion); /* mark us complete */
++}
++
++/*
++ * Construct a blob encapsulation job descriptor
++ *
++ * This function dynamically constructs a blob encapsulation job descriptor
++ * from the following arguments:
++ *
++ * - desc pointer to a pointer to the descriptor generated by this
++ * function. Caller will be responsible to kfree() this
++ * descriptor after execution.
++ * - keymod Physical pointer to a key modifier, which must reside in a
++ * contiguous piece of memory. Modifier will be assumed to be
++ * 8 bytes long for a blob of type SM_SECMEM, or 16 bytes long
++ * for a blob of type SM_GENMEM (see blobtype argument).
++ * - secretbuf Physical pointer to a secret, normally a black or red key,
++ * possibly residing within an accessible secure memory page,
++ * of the secret to be encapsulated to an output blob.
++ * - outbuf Physical pointer to the destination buffer to receive the
++ * encapsulated output. This buffer will need to be 48 bytes
++ * larger than the input because of the added encapsulation data.
++ * The generated descriptor will account for the increase in size,
++ * but the caller must also account for this increase in the
++ * buffer allocator.
++ * - secretsz Size of input secret, in bytes. This is limited to 65536
++ * less the size of blob overhead, since the length embeds into
++ * DECO pointer in/out instructions.
++ * - keycolor Determines if the source data is covered (black key) or
++ * plaintext (red key). RED_KEY or BLACK_KEY are defined in
++ * for this purpose.
++ * - blobtype Determine if encapsulated blob should be a secure memory
++ * blob (SM_SECMEM), with partition data embedded with key
++ * material, or a general memory blob (SM_GENMEM).
++ * - auth If BLACK_KEY source is covered via AES-CCM, specify
++ * KEY_COVER_CCM, else uses AES-ECB (KEY_COVER_ECB).
++ *
++ * Upon completion, desc points to a buffer containing a CAAM job
++ * descriptor which encapsulates data into an externally-storable blob
++ * suitable for use across power cycles.
++ *
++ * This is an example of a black key encapsulation job into a general memory
++ * blob. Notice the 16-byte key modifier in the LOAD instruction. Also note
++ * the output 48 bytes longer than the input:
++ *
++ * [00] B0800008 jobhdr: stidx=0 len=8
++ * [01] 14400010 ld: ccb2-key len=16 offs=0
++ * [02] 08144891 ptr->@0x08144891
++ * [03] F800003A seqoutptr: len=58
++ * [04] 01000000 out_ptr->@0x01000000
++ * [05] F000000A seqinptr: len=10
++ * [06] 09745090 in_ptr->@0x09745090
++ * [07] 870D0004 operation: encap blob reg=memory, black, format=normal
++ *
++ * This is an example of a red key encapsulation job for storing a red key
++ * into a secure memory blob. Note the 8 byte modifier on the 12 byte offset
++ * in the LOAD instruction; this accounts for blob permission storage:
++ *
++ * [00] B0800008 jobhdr: stidx=0 len=8
++ * [01] 14400C08 ld: ccb2-key len=8 offs=12
++ * [02] 087D0784 ptr->@0x087d0784
++ * [03] F8000050 seqoutptr: len=80
++ * [04] 09251BB2 out_ptr->@0x09251bb2
++ * [05] F0000020 seqinptr: len=32
++ * [06] 40000F31 in_ptr->@0x40000f31
++ * [07] 870D0008 operation: encap blob reg=memory, red, sec_mem,
++ * format=normal
++ *
++ * Note: this function only generates 32-bit pointers at present, and should
++ * be refactored using a scheme that allows both 32 and 64 bit addressing
++ */
++
++static int blob_encap_jobdesc(u32 **desc, dma_addr_t keymod,
++ void *secretbuf, dma_addr_t outbuf,
++ u16 secretsz, u8 keycolor, u8 blobtype, u8 auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /*
++ * Key modifier works differently for secure/general memory blobs
++ * This accounts for the permission/protection data encapsulated
++ * within the blob if a secure memory blob is requested
++ */
++ if (blobtype == SM_SECMEM)
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK)
++ | (8 & LDST_LEN_MASK);
++ else /* is general memory blob */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_KEY | (16 & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /*
++ * Encapsulation output must include space for blob key encryption
++ * key and MAC tag
++ */
++ tmpdesc[idx++] = CMD_SEQ_OUT_PTR | (secretsz + BLOB_OVERHEAD);
++ tmpdesc[idx++] = (u32)outbuf;
++
++ /* Input data, should be somewhere in secure memory */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | secretsz;
++ tmpdesc[idx++] = (u32)secretbuf;
++
++ /* Set blob encap, then color */
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB;
++
++ if (blobtype == SM_SECMEM)
++ tmpdesc[idx] |= OP_PCL_BLOB_PTXT_SECMEM;
++
++ if (auth == KEY_COVER_CCM)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ if (keycolor == BLACK_KEY)
++ tmpdesc[idx] |= OP_PCL_BLOB_BLACK;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Construct a blob decapsulation job descriptor
++ *
++ * This function dynamically constructs a blob decapsulation job descriptor
++ * from the following arguments:
++ *
++ * - desc pointer to a pointer to the descriptor generated by this
++ * function. Caller will be responsible to kfree() this
++ * descriptor after execution.
++ * - keymod Physical pointer to a key modifier, which must reside in a
++ * contiguous piece of memory. Modifier will be assumed to be
++ * 8 bytes long for a blob of type SM_SECMEM, or 16 bytes long
++ * for a blob of type SM_GENMEM (see blobtype argument).
++ * - blobbuf Physical pointer (into external memory) of the blob to
++ * be decapsulated. Blob must reside in a contiguous memory
++ * segment.
++ * - outbuf Physical pointer of the decapsulated output, possibly into
++ * a location within a secure memory page. Must be contiguous.
++ * - secretsz Size of encapsulated secret in bytes (not the size of the
++ * input blob).
++ * - keycolor Determines if decapsulated content is encrypted (BLACK_KEY)
++ * or left as plaintext (RED_KEY).
++ * - blobtype Determine if encapsulated blob should be a secure memory
++ * blob (SM_SECMEM), with partition data embedded with key
++ * material, or a general memory blob (SM_GENMEM).
++ * - auth If decapsulation path is specified by BLACK_KEY, then if
++ * AES-CCM is requested for key covering use KEY_COVER_CCM, else
++ * use AES-ECB (KEY_COVER_ECB).
++ *
++ * Upon completion, desc points to a buffer containing a CAAM job descriptor
++ * that decapsulates a key blob from external memory into a black (encrypted)
++ * key or red (plaintext) content.
++ *
++ * This is an example of a black key decapsulation job from a general memory
++ * blob. Notice the 16-byte key modifier in the LOAD instruction.
++ *
++ * [00] B0800008 jobhdr: stidx=0 len=8
++ * [01] 14400010 ld: ccb2-key len=16 offs=0
++ * [02] 08A63B7F ptr->@0x08a63b7f
++ * [03] F8000010 seqoutptr: len=16
++ * [04] 01000000 out_ptr->@0x01000000
++ * [05] F000003A seqinptr: len=58
++ * [06] 01000010 in_ptr->@0x01000010
++ * [07] 860D0004 operation: decap blob reg=memory, black, format=normal
++ *
++ * This is an example of a red key decapsulation job for restoring a red key
++ * from a secure memory blob. Note the 8 byte modifier on the 12 byte offset
++ * in the LOAD instruction:
++ *
++ * [00] B0800008 jobhdr: stidx=0 len=8
++ * [01] 14400C08 ld: ccb2-key len=8 offs=12
++ * [02] 01000000 ptr->@0x01000000
++ * [03] F8000020 seqoutptr: len=32
++ * [04] 400000E6 out_ptr->@0x400000e6
++ * [05] F0000050 seqinptr: len=80
++ * [06] 08F0C0EA in_ptr->@0x08f0c0ea
++ * [07] 860D0008 operation: decap blob reg=memory, red, sec_mem,
++ * format=normal
++ *
++ * Note: this function only generates 32-bit pointers at present, and should
++ * be refactored using a scheme that allows both 32 and 64 bit addressing
++ */
++
++static int blob_decap_jobdesc(u32 **desc, dma_addr_t keymod, dma_addr_t blobbuf,
++ u8 *outbuf, u16 secretsz, u8 keycolor,
++ u8 blobtype, u8 auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key modifier */
++ if (blobtype == SM_SECMEM)
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK)
++ | (8 & LDST_LEN_MASK);
++ else /* is general memory blob */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_KEY | (16 & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /* Compensate BKEK + MAC tag over size of encapsulated secret */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (secretsz + BLOB_OVERHEAD);
++ tmpdesc[idx++] = (u32)blobbuf;
++ tmpdesc[idx++] = CMD_SEQ_OUT_PTR | secretsz;
++ tmpdesc[idx++] = (u32)outbuf;
++
++ /* Decapsulate from secure memory partition to black blob */
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB;
++
++ if (blobtype == SM_SECMEM)
++ tmpdesc[idx] |= OP_PCL_BLOB_PTXT_SECMEM;
++
++ if (auth == KEY_COVER_CCM)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ if (keycolor == BLACK_KEY)
++ tmpdesc[idx] |= OP_PCL_BLOB_BLACK;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++
++
++static int gen_mem_encap(struct device *jr_dev, void __user *secretbuf,
++ int keylen, void __user *kmodbuf, void __user *outbuf)
++{
++ int retval = 0;
++ u32 dsize;
++ u32 __iomem *encapdesc = NULL;
++ dma_addr_t secret_dma = 0, keymod_dma = 0, outbuf_dma = 0;
++ u8 __iomem *lsecret = NULL, *lkeymod = NULL, *loutbuf = NULL;
++ struct sm_key_job_result testres;
++
++ /* Build/map/flush the scret */
++ lsecret = kmalloc(keylen, GFP_KERNEL | GFP_DMA);
++ if (!lsecret) {
++ dev_err(jr_dev, "%s: can't alloc for key\n", __func__);
++ retval = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(lsecret, secretbuf, keylen)) {
++ dev_err(jr_dev, "%s: can't Copy for key\n", __func__);
++ retval = -EFAULT;
++ goto out;
++ }
++ secret_dma = dma_map_single(jr_dev, lsecret, keylen,
++ DMA_TO_DEVICE);
++
++ /* Build/map/flush the key modifier */
++ lkeymod = kmalloc(GENMEM_KEYMOD_LEN, GFP_KERNEL | GFP_DMA);
++ if (!lkeymod) {
++ dev_err(jr_dev, "%s: can't alloc for keymod\n", __func__);
++ retval = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(lkeymod, kmodbuf, GENMEM_KEYMOD_LEN)) {
++ dev_err(jr_dev, "%s: can't Copy for keymod\n", __func__);
++ retval = -EFAULT;
++ goto out;
++ }
++ keymod_dma = dma_map_single(jr_dev, lkeymod, GENMEM_KEYMOD_LEN,
++ DMA_TO_DEVICE);
++
++ loutbuf = kmalloc(keylen + BLOB_OVERHEAD, GFP_KERNEL | GFP_DMA);
++ if (!lkeymod) {
++ dev_err(jr_dev, "%s: can't alloc for output\n", __func__);
++ retval = -ENOMEM;
++ goto out;
++ }
++ outbuf_dma = dma_map_single(jr_dev, loutbuf, keylen + BLOB_OVERHEAD,
++ DMA_FROM_DEVICE);
++ dsize = blob_encap_jobdesc(&encapdesc, keymod_dma, (void *)secret_dma, outbuf_dma,
++ keylen, RED_KEY, SM_GENMEM, KEY_COVER_ECB);
++ if (!dsize) {
++ dev_err(jr_dev, "can't alloc an encapsulation descriptor\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ init_completion(&testres.completion);
++
++ retval = caam_jr_enqueue(jr_dev, encapdesc, sm_key_job_done,
++ &testres);
++ if (!retval) {
++ wait_for_completion_interruptible(&testres.completion);
++ dev_info(jr_dev, "job ring return %d\n", testres.error);
++ if (!testres.error) {
++ dma_sync_single_for_cpu(jr_dev, outbuf_dma, keylen + BLOB_OVERHEAD,
++ DMA_FROM_DEVICE);
++
++ if (copy_to_user(outbuf, loutbuf, keylen + BLOB_OVERHEAD)) {
++ retval = -EFAULT;
++ dev_err(jr_dev, "can't copy for output\n");
++ goto out;
++ }
++ }
++ retval = testres.error;
++ }
++
++out:
++ if (outbuf_dma)
++ dma_unmap_single(jr_dev, outbuf_dma, keylen + BLOB_OVERHEAD,
++ DMA_FROM_DEVICE);
++ if (keymod_dma)
++ dma_unmap_single(jr_dev, keymod_dma, GENMEM_KEYMOD_LEN, DMA_TO_DEVICE);
++ if (secret_dma)
++ dma_unmap_single(jr_dev, secret_dma, keylen, DMA_TO_DEVICE);
++ kfree(encapdesc);
++ kfree(lkeymod);
++ kfree(lsecret);
++ kfree(loutbuf);
++
++ return retval;
++}
++
++static int gen_mem_decap(struct device *jr_dev, void __user *keyblobbuf,
++ int bloblen, void __user *kmodbuf, void __user *outbuf)
++{
++ int retval = 0;
++ int keylen = bloblen - BLOB_OVERHEAD;
++ u32 dsize;
++ dma_addr_t keyblob_dma = 0, keymod_dma = 0, outbuf_dma = 0;
++ u8 __iomem *lkeyblob = NULL, *lkeymod = NULL, *loutbuf = NULL;
++ struct sm_key_job_result testres;
++ u32 __iomem *decapdesc = NULL;
++
++ /* Build/map/flush the scret */
++ lkeyblob = kmalloc(bloblen, GFP_KERNEL | GFP_DMA);
++ if (!lkeyblob) {
++ dev_err(jr_dev, "%s: can't alloc for keylob\n", __func__);
++ retval = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(lkeyblob, keyblobbuf, bloblen)) {
++ dev_err(jr_dev, "%s: can't Copy for keyblob\n", __func__);
++ retval = -EFAULT;
++ goto out;
++ }
++ keyblob_dma = dma_map_single(jr_dev, lkeyblob, bloblen,
++ DMA_TO_DEVICE);
++
++ /* Build/map/flush the key modifier */
++ lkeymod = kmalloc(GENMEM_KEYMOD_LEN, GFP_KERNEL | GFP_DMA);
++ if (!lkeymod) {
++ dev_err(jr_dev, "%s: can't alloc for keymod\n", __func__);
++ retval = -ENOMEM;
++ goto out;
++ }
++ if (copy_from_user(lkeymod, kmodbuf, GENMEM_KEYMOD_LEN)) {
++ dev_err(jr_dev, "%s: can't Copy for keymod\n", __func__);
++ retval = -EFAULT;
++ goto out;
++ }
++ keymod_dma = dma_map_single(jr_dev, lkeymod, GENMEM_KEYMOD_LEN,
++ DMA_TO_DEVICE);
++
++ loutbuf = kmalloc(keylen, GFP_KERNEL | GFP_DMA);
++ if (!loutbuf) {
++ dev_err(jr_dev, "%s: can't alloc for outbuf\n", __func__);
++ retval = -ENOMEM;
++ goto out;
++ }
++ outbuf_dma = dma_map_single(jr_dev, loutbuf, keylen,
++ DMA_FROM_DEVICE);
++
++ /* Build the encapsulation job descriptor */
++ dsize = blob_decap_jobdesc(&decapdesc, keymod_dma, keyblob_dma, (u8 *)outbuf_dma,
++ keylen, RED_KEY, SM_GENMEM, KEY_COVER_ECB);
++ if (!dsize) {
++ dev_err(jr_dev, "can't alloc a decapsulation descriptor\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++
++ init_completion(&testres.completion);
++
++ retval = caam_jr_enqueue(jr_dev, decapdesc, sm_key_job_done,
++ &testres);
++ if (!retval) {
++ wait_for_completion_interruptible(&testres.completion);
++ dev_info(jr_dev, "job ring return %d\n", testres.error);
++ if (!testres.error) {
++ dma_sync_single_for_cpu(jr_dev, outbuf_dma, keylen,
++ DMA_FROM_DEVICE);
++
++ if (copy_to_user(outbuf, loutbuf, keylen)) {
++ retval = -EFAULT;
++ goto out;
++ }
++ }
++ retval = testres.error;
++ }
++
++out:
++ if (outbuf_dma)
++ dma_unmap_single(jr_dev, outbuf_dma, keylen,
++ DMA_FROM_DEVICE);
++ if (keymod_dma)
++ dma_unmap_single(jr_dev, keymod_dma, GENMEM_KEYMOD_LEN,
++ DMA_TO_DEVICE);
++ if (keyblob_dma)
++ dma_unmap_single(jr_dev, keyblob_dma, bloblen,
++ DMA_TO_DEVICE);
++ kfree(decapdesc);
++ kfree(lkeymod);
++ kfree(lkeyblob);
++ kfree(loutbuf);
++
++ return retval;
++}
++
++
++static long kb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ int retval = 0;
++ struct caam_kb_data kb_data;
++ struct miscdevice *miscdev = file->private_data;
++ struct kb_device *dev = container_of(miscdev, struct kb_device, misc_dev);
++
++ if (copy_from_user(&kb_data, (void *)arg, sizeof(kb_data))) {
++ retval = -EFAULT;
++ goto err;
++ }
++
++ if (!kb_data.rawkey || !kb_data.keyblob ||
++ (kb_data.rawkey_len + BLOB_OVERHEAD != kb_data.keyblob_len) ||
++ (kb_data.keymod_len != GENMEM_KEYMOD_LEN)) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ printk(KERN_INFO"%s:rawkey_len %d, keyblob_len %d\n",
++ __func__, kb_data.rawkey_len, kb_data.keyblob_len);
++
++ switch (cmd) {
++ case CAAM_KB_ENCRYPT:
++ {
++ retval = gen_mem_encap(dev->jr_dev, kb_data.rawkey, kb_data.rawkey_len,
++ kb_data.keymod, kb_data.keyblob);
++ break;
++ }
++ case CAAM_KB_DECRYPT:
++ {
++ retval = gen_mem_decap(dev->jr_dev, kb_data.keyblob, kb_data.keyblob_len,
++ kb_data.keymod, kb_data.rawkey);
++ break;
++ }
++ default:
++ return -ENOTTY;
++ }
++
++err:
++ return retval;
++}
++
++static const struct file_operations kb_fops = {
++ .owner = THIS_MODULE,
++ .open = kb_open,
++ .release = kb_release,
++ .unlocked_ioctl = kb_ioctl,
++};
++
++static struct kb_device *kb_device_create(void)
++{
++ struct kb_device *idev;
++ int ret;
++
++ idev = kzalloc(sizeof(struct kb_device), GFP_KERNEL);
++ if (!idev)
++ return ERR_PTR(-ENOMEM);
++
++ idev->misc_dev.minor = MISC_DYNAMIC_MINOR;
++ idev->misc_dev.name = "caam_kb";
++ idev->misc_dev.fops = &kb_fops;
++ idev->misc_dev.parent = NULL;
++ ret = misc_register(&idev->misc_dev);
++ if (ret) {
++ pr_err("ion: failed to register misc device.\n");
++ return ERR_PTR(ret);
++ }
++
++ return idev;
++}
++
++static int kb_device_destroy(struct kb_device *kb_dev)
++{
++ if ((kb_dev) && (kb_dev->jr_dev)) {
++ caam_jr_free(kb_dev->jr_dev);
++ kb_dev->jr_dev = NULL;
++ }
++
++ if (kb_dev)
++ misc_deregister(&kb_dev->misc_dev);
++
++ return 0;
++}
++/*
++ * Probe key blob device
++ */
++static int caam_keyblob_probe(struct platform_device *pdev)
++{
++ int err;
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ kb_dev = kb_device_create();
++ if (IS_ERR_OR_NULL(kb_dev)) {
++ err = PTR_ERR(kb_dev);
++ goto err;
++ }
++ return 0;
++err:
++ return err;
++}
++
++/*
++ * Remove key blob device
++ */
++static int caam_keyblob_remove(struct platform_device *pdev)
++{
++ kb_device_destroy(kb_dev);
++ return 0;
++}
++
++static struct of_device_id caam_keyblob_match[] = {
++ {
++ .compatible = "fsl,sec-v4.0-keyblob",
++ },
++ {
++ .compatible = "fsl,sec4.0-keyblob",
++ },
++ {},
++};
++
++MODULE_DEVICE_TABLE(of, caam_keyblob_match);
++
++static struct platform_driver caam_keyblob_driver = {
++ .driver = {
++ .name = "caam_keyblob",
++ .owner = THIS_MODULE,
++ .of_match_table = caam_keyblob_match,
++ },
++ .probe = caam_keyblob_probe,
++ .remove = caam_keyblob_remove,
++};
++
++static int __init keyblob_driver_init(void)
++{
++ return platform_driver_register(&caam_keyblob_driver);
++}
++
++static void __exit keyblob_driver_exit(void)
++{
++ platform_driver_unregister(&caam_keyblob_driver);
++}
++
++module_init(keyblob_driver_init);
++module_exit(keyblob_driver_exit);
++
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Secure Memory / Keystore");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
+diff -Nur linux-4.1.3/drivers/crypto/caam/caam_keyblob.h linux-xbian-imx6/drivers/crypto/caam/caam_keyblob.h
+--- linux-4.1.3/drivers/crypto/caam/caam_keyblob.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/crypto/caam/caam_keyblob.h 2015-07-27 23:13:04.205975852 +0200
+@@ -0,0 +1,45 @@
++/*
++ * CAAM public-level include definitions for the key blob
++ *
++ * Copyright (C) 2015 Freescale Semiconductor, Inc.
++ */
++
++#ifndef CAAM_KEYBLOB_H
++#define CAAM_KEYBLOB_H
++
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++struct caam_kb_data {
++ char *rawkey;
++ size_t rawkey_len;
++ char *keyblob;
++ size_t keyblob_len;
++ char *keymod;
++ size_t keymod_len;
++};
++
++
++#define CAAM_KB_MAGIC 'I'
++
++/**
++ * DOC: CAAM_KB_ENCRYPT - generate a key blob from raw key
++ *
++ * Takes an caam_kb_data struct and returns it with the key blob
++ */
++#define CAAM_KB_ENCRYPT _IOWR(CAAM_KB_MAGIC, 0, \
++ struct caam_kb_data)
++
++/**
++ * DOC: CAAM_KB_DECRYPT - get keys from a key blob
++ *
++ * Takes an caam_kb_data struct and returns it with the raw key.
++ */
++#define CAAM_KB_DECRYPT _IOWR(CAAM_KB_MAGIC, 1, struct caam_kb_data)
++
++#ifndef GENMEM_KEYMOD_LEN
++#define GENMEM_KEYMOD_LEN 16
++#endif
++
++#endif /* CAAM_KEYBLOB_H */
+diff -Nur linux-4.1.3/drivers/crypto/caam/caamrng.c linux-xbian-imx6/drivers/crypto/caam/caamrng.c
+--- linux-4.1.3/drivers/crypto/caam/caamrng.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/caamrng.c 2015-07-27 23:13:04.209961631 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * caam - Freescale FSL CAAM support for hw_random
+ *
+- * Copyright 2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
+ *
+ * Based on caamalg.c crypto API driver.
+ *
+@@ -80,9 +80,12 @@
+
+ static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
+ {
+- if (bd->addr)
++ if (bd->addr) {
++ dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE,
++ DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
+ DMA_FROM_DEVICE);
++ }
+ }
+
+ static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
+@@ -103,11 +106,18 @@
+ bd = (struct buf_data *)((char *)desc -
+ offsetof(struct buf_data, hw_desc));
+
+- if (err)
+- caam_jr_strstatus(jrdev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+ atomic_set(&bd->empty, BUF_NOT_EMPTY);
+ complete(&bd->filled);
++
++ /* Buffer refilled, invalidate cache */
++ dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
++
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
+ DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
+@@ -185,7 +195,7 @@
+ max - copied_idx, false);
+ }
+
+-static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
++static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
+ {
+ struct device *jrdev = ctx->jrdev;
+ u32 *desc = ctx->sh_desc;
+@@ -203,18 +213,16 @@
+
+ ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
+ DMA_TO_DEVICE);
+- if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+- dev_err(jrdev, "unable to map shared descriptor\n");
+- return -ENOMEM;
+- }
++ dma_sync_single_for_device(jrdev, ctx->sh_desc_dma, desc_bytes(desc),
++ DMA_TO_DEVICE);
++
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+ #endif
+- return 0;
+ }
+
+-static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
++static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+ {
+ struct device *jrdev = ctx->jrdev;
+ struct buf_data *bd = &ctx->bufs[buf_id];
+@@ -225,17 +233,12 @@
+ HDR_REVERSE);
+
+ bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+- if (dma_mapping_error(jrdev, bd->addr)) {
+- dev_err(jrdev, "unable to map dst\n");
+- return -ENOMEM;
+- }
+
+ append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
+ desc, desc_bytes(desc), 1);
+ #endif
+- return 0;
+ }
+
+ static void caam_cleanup(struct hwrng *rng)
+@@ -252,44 +255,67 @@
+ rng_unmap_ctx(rng_ctx);
+ }
+
+-static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
++static inline void test_len(struct hwrng *rng, size_t len, bool wait)
+ {
+- struct buf_data *bd = &ctx->bufs[buf_id];
+- int err;
++ u8 *buf;
++ int real_len;
+
+- err = rng_create_job_desc(ctx, buf_id);
+- if (err)
+- return err;
++ buf = kzalloc(sizeof(u8) * len, GFP_KERNEL);
++ real_len = rng->read(rng, buf, len, wait);
++ if (real_len == 0 && wait)
++ pr_err("WAITING FAILED\n");
++ pr_info("wanted %d bytes, got %d\n", len, real_len);
++ print_hex_dump(KERN_INFO, "random bytes@: ", DUMP_PREFIX_ADDRESS,
++ 16, 4, buf, real_len, 1);
++ kfree(buf);
++}
+
+- atomic_set(&bd->empty, BUF_EMPTY);
+- submit_job(ctx, buf_id == ctx->current_buf);
+- wait_for_completion(&bd->filled);
++static inline void test_mode_once(struct hwrng *rng, bool wait)
++{
++#define TEST_CHUNK (RN_BUF_SIZE / 4)
+
+- return 0;
++ test_len(rng, TEST_CHUNK, wait);
++ test_len(rng, RN_BUF_SIZE * 2, wait);
++ test_len(rng, RN_BUF_SIZE * 2 - TEST_CHUNK, wait);
+ }
+
+-static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
++static inline void test_mode(struct hwrng *rng, bool wait)
+ {
+- int err;
+-
+- ctx->jrdev = jrdev;
++#define TEST_PASS 1
++ int i;
+
+- err = rng_create_sh_desc(ctx);
+- if (err)
+- return err;
++ for (i = 0; i < TEST_PASS; i++)
++ test_mode_once(rng, wait);
++}
+
+- ctx->current_buf = 0;
+- ctx->cur_buf_idx = 0;
++static void self_test(struct hwrng *rng)
++{
++ pr_info("testing without waiting\n");
++ test_mode(rng, false);
++ pr_info("testing with waiting\n");
++ test_mode(rng, true);
++}
++#endif
+
+- err = caam_init_buf(ctx, 0);
+- if (err)
+- return err;
++static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
++{
++ struct buf_data *bd = &ctx->bufs[buf_id];
+
+- err = caam_init_buf(ctx, 1);
+- if (err)
+- return err;
++ rng_create_job_desc(ctx, buf_id);
++ atomic_set(&bd->empty, BUF_EMPTY);
++ submit_job(ctx, buf_id == ctx->current_buf);
++ wait_for_completion(&bd->filled);
++}
+
+- return 0;
++static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
++{
++ ctx->jrdev = jrdev;
++ rng_create_sh_desc(ctx);
++ ctx->current_buf = 0;
++ ctx->cur_buf_idx = 0;
++ caam_init_buf(ctx, 0);
++ caam_init_buf(ctx, 1);
+ }
+
+ static struct hwrng caam_rng = {
+@@ -300,19 +326,15 @@
+
+ static void __exit caam_rng_exit(void)
+ {
+- caam_jr_free(rng_ctx->jrdev);
+ hwrng_unregister(&caam_rng);
+- kfree(rng_ctx);
+ }
+
+ static int __init caam_rng_init(void)
+ {
+- struct device *dev;
+ struct device_node *dev_node;
+ struct platform_device *pdev;
+ struct device *ctrldev;
+- void *priv;
+- int err;
++ struct caam_drv_private *priv;
+
+ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+ if (!dev_node) {
+@@ -322,35 +344,26 @@
+ }
+
+ pdev = of_find_device_by_node(dev_node);
+- if (!pdev) {
+- of_node_put(dev_node);
++ if (!pdev)
+ return -ENODEV;
+- }
+
+ ctrldev = &pdev->dev;
+ priv = dev_get_drvdata(ctrldev);
+ of_node_put(dev_node);
+
+- /*
+- * If priv is NULL, it's probably because the caam driver wasn't
+- * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+- */
+- if (!priv)
++ /* Check RNG present in hardware before registration */
++ if (!(rd_reg64(&priv->ctrl->perfmon.cha_num) & CHA_ID_RNG_MASK))
+ return -ENODEV;
+
+- dev = caam_jr_alloc();
+- if (IS_ERR(dev)) {
+- pr_err("Job Ring Device allocation for transform failed\n");
+- return PTR_ERR(dev);
+- }
+- rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
+- if (!rng_ctx)
+- return -ENOMEM;
+- err = caam_init_rng(rng_ctx, dev);
+- if (err)
+- return err;
++ rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_KERNEL | GFP_DMA);
++
++ caam_init_rng(rng_ctx, priv->jrdev[0]);
++
++#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
++ self_test(&caam_rng);
++#endif
+
+- dev_info(dev, "registering rng-caam\n");
++ dev_info(priv->jrdev[0], "registering rng-caam\n");
+ return hwrng_register(&caam_rng);
+ }
+
+diff -Nur linux-4.1.3/drivers/crypto/caam/compat.h linux-xbian-imx6/drivers/crypto/caam/compat.h
+--- linux-4.1.3/drivers/crypto/caam/compat.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/compat.h 2015-07-27 23:13:04.209961631 +0200
+@@ -14,6 +14,8 @@
+ #include <linux/hash.h>
+ #include <linux/hw_random.h>
+ #include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/io.h>
+ #include <linux/spinlock.h>
+@@ -23,12 +25,15 @@
+ #include <linux/types.h>
+ #include <linux/debugfs.h>
+ #include <linux/circ_buf.h>
++#include <crypto/scatterwalk.h>
++
++#ifdef CONFIG_ARM /* needs the clock control subsystem */
++#include <linux/clk.h>
++#endif
+ #include <net/xfrm.h>
+
+ #include <crypto/algapi.h>
+-#include <crypto/null.h>
+ #include <crypto/aes.h>
+-#include <crypto/ctr.h>
+ #include <crypto/des.h>
+ #include <crypto/sha.h>
+ #include <crypto/md5.h>
+diff -Nur linux-4.1.3/drivers/crypto/caam/ctrl.c linux-xbian-imx6/drivers/crypto/caam/ctrl.c
+--- linux-4.1.3/drivers/crypto/caam/ctrl.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/ctrl.c 2015-07-27 23:13:04.209961631 +0200
+@@ -1,405 +1,260 @@
+-/* * CAAM control-plane driver backend
++/*
++ * CAAM control-plane driver backend
+ * Controller-level driver, kernel property detection, initialization
+ *
+- * Copyright 2008-2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ */
+
+-#include <linux/device.h>
+-#include <linux/of_address.h>
+-#include <linux/of_irq.h>
+-
+ #include "compat.h"
+ #include "regs.h"
+ #include "intern.h"
+ #include "jr.h"
+ #include "desc_constr.h"
+ #include "error.h"
++#include "ctrl.h"
++#include "sm.h"
++#include <linux/device.h>
+
+-/*
+- * Descriptor to instantiate RNG State Handle 0 in normal mode and
+- * load the JDKEK, TDKEK and TDSK registers
+- */
+-static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
+-{
+- u32 *jump_cmd, op_flags;
+-
+- init_job_desc(desc, 0);
+-
+- op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+- (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+-
+- /* INIT RNG in non-test mode */
+- append_operation(desc, op_flags);
+-
+- if (!handle && do_sk) {
+- /*
+- * For SH0, Secure Keys must be generated as well
+- */
++/* Used to capture the array of job rings */
++struct device **caam_jr_dev;
+
+- /* wait for done */
+- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+- set_jump_tgt_here(desc, jump_cmd);
++static int caam_remove(struct platform_device *pdev)
++{
++ struct device *ctrldev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_jr *jrpriv;
++ struct caam_full __iomem *topregs;
++ int ring, ret = 0;
+
+- /*
+- * load 1 to clear written reg:
+- * resets the done interrrupt and returns the RNG to idle.
+- */
+- append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+- /* Initialize State Handle */
+- append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+- OP_ALG_AAI_RNG4_SK);
++ /* shut down JobRs */
++ for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
++ ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
++ jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
++ irq_dispose_mapping(jrpriv->irq);
+ }
+
+- append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+-}
++ /* Shut down debug views */
++#ifdef CONFIG_DEBUG_FS
++ debugfs_remove_recursive(ctrlpriv->dfs_root);
++#endif
+
+-/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+-static void build_deinstantiation_desc(u32 *desc, int handle)
+-{
+- init_job_desc(desc, 0);
++ /* Unmap controller region */
++ iounmap(&topregs->ctrl);
+
+- /* Uninstantiate State Handle 0 */
+- append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+- (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
++#ifdef CONFIG_ARM
++ /* shut clocks off before finalizing shutdown */
++ clk_disable(ctrlpriv->caam_ipg);
++ clk_disable(ctrlpriv->caam_mem);
++ clk_disable(ctrlpriv->caam_aclk);
++#endif
++
++ kfree(ctrlpriv->jrdev);
++ kfree(ctrlpriv);
+
+- append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
++ return ret;
+ }
+
+ /*
+- * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+- * the software (no JR/QI used).
+- * @ctrldev - pointer to device
+- * @status - descriptor status, after being run
+- *
+- * Return: - 0 if no error occurred
+- * - -ENODEV if the DECO couldn't be acquired
+- * - -EAGAIN if an error occurred while executing the descriptor
++ * Descriptor to instantiate RNG State Handle 0 in normal mode and
++ * load the JDKEK, TDKEK and TDSK registers
+ */
+-static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+- u32 *status)
++static void build_instantiation_desc(u32 *desc)
+ {
+- struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+- struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
+- struct caam_deco __iomem *deco = ctrlpriv->deco;
+- unsigned int timeout = 100000;
+- u32 deco_dbg_reg, flags;
+- int i;
+-
++ u32 *jump_cmd;
+
+- if (ctrlpriv->virt_en == 1) {
+- setbits32(&ctrl->deco_rsr, DECORSR_JR0);
+-
+- while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
+- --timeout)
+- cpu_relax();
+-
+- timeout = 100000;
+- }
+-
+- setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+-
+- while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
+- --timeout)
+- cpu_relax();
++ init_job_desc(desc, 0);
+
+- if (!timeout) {
+- dev_err(ctrldev, "failed to acquire DECO 0\n");
+- clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+- return -ENODEV;
+- }
++ /* INIT RNG in non-test mode */
++ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
++ OP_ALG_AS_INIT);
+
+- for (i = 0; i < desc_len(desc); i++)
+- wr_reg32(&deco->descbuf[i], *(desc + i));
++ /* wait for done */
++ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
++ set_jump_tgt_here(desc, jump_cmd);
+
+- flags = DECO_JQCR_WHL;
+ /*
+- * If the descriptor length is longer than 4 words, then the
+- * FOUR bit in JRCTRL register must be set.
++ * load 1 to clear written reg:
++ * resets the done interrupt and returns the RNG to idle.
+ */
+- if (desc_len(desc) >= 4)
+- flags |= DECO_JQCR_FOUR;
++ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+- /* Instruct the DECO to execute it */
+- wr_reg32(&deco->jr_ctl_hi, flags);
+-
+- timeout = 10000000;
+- do {
+- deco_dbg_reg = rd_reg32(&deco->desc_dbg);
+- /*
+- * If an error occured in the descriptor, then
+- * the DECO status field will be set to 0x0D
+- */
+- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+- DESC_DBG_DECO_STAT_HOST_ERR)
+- break;
+- cpu_relax();
+- } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
+-
+- *status = rd_reg32(&deco->op_status_hi) &
+- DECO_OP_STATUS_HI_ERR_MASK;
+-
+- if (ctrlpriv->virt_en == 1)
+- clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
+-
+- /* Mark the DECO as free */
+- clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
+-
+- if (!timeout)
+- return -EAGAIN;
+-
+- return 0;
+ }
+
+-/*
+- * instantiate_rng - builds and executes a descriptor on DECO0,
+- * which initializes the RNG block.
+- * @ctrldev - pointer to device
+- * @state_handle_mask - bitmask containing the instantiation status
+- * for the RNG4 state handles which exist in
+- * the RNG4 block: 1 if it's been instantiated
+- * by an external entry, 0 otherwise.
+- * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+- * Caution: this can be done only once; if the keys need to be
+- * regenerated, a POR is required
+- *
+- * Return: - 0 if no error occurred
+- * - -ENOMEM if there isn't enough memory to allocate the descriptor
+- * - -ENODEV if DECO0 couldn't be acquired
+- * - -EAGAIN if an error occurred when executing the descriptor
+- * f.i. there was a RNG hardware error due to not "good enough"
+- * entropy being aquired.
+- */
+-static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+- int gen_sk)
++static void generate_secure_keys_desc(u32 *desc)
+ {
+- struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+- struct caam_ctrl __iomem *ctrl;
+- u32 *desc, status, rdsta_val;
+- int ret = 0, sh_idx;
+-
+- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+- desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+- if (!desc)
+- return -ENOMEM;
++ /* generate secure keys (non-test) */
++ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
++ OP_ALG_RNG4_SK);
++}
+
+- for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+- /*
+- * If the corresponding bit is set, this state handle
+- * was initialized by somebody else, so it's left alone.
+- */
+- if ((1 << sh_idx) & state_handle_mask)
+- continue;
++struct instantiate_result {
++ struct completion completion;
++ int err;
++};
+
+- /* Create the descriptor for instantiating RNG State Handle */
+- build_instantiation_desc(desc, sh_idx, gen_sk);
++static void rng4_init_done(struct device *dev, u32 *desc, u32 err,
++ void *context)
++{
++ struct instantiate_result *instantiation = context;
+
+- /* Try to run it through DECO0 */
+- ret = run_descriptor_deco0(ctrldev, desc, &status);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
+
+- /*
+- * If ret is not 0, or descriptor status is not 0, then
+- * something went wrong. No need to try the next state
+- * handle (if available), bail out here.
+- * Also, if for some reason, the State Handle didn't get
+- * instantiated although the descriptor has finished
+- * without any error (HW optimizations for later
+- * CAAM eras), then try again.
+- */
+- rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
+- if (status || !(rdsta_val & (1 << sh_idx)))
+- ret = -EAGAIN;
+- if (ret)
+- break;
+- dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+- /* Clear the contents before recreating the descriptor */
+- memset(desc, 0x00, CAAM_CMD_SZ * 7);
++ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+- kfree(desc);
+-
+- return ret;
++ instantiation->err = err;
++ complete(&instantiation->completion);
+ }
+
+-/*
+- * deinstantiate_rng - builds and executes a descriptor on DECO0,
+- * which deinitializes the RNG block.
+- * @ctrldev - pointer to device
+- * @state_handle_mask - bitmask containing the instantiation status
+- * for the RNG4 state handles which exist in
+- * the RNG4 block: 1 if it's been instantiated
+- *
+- * Return: - 0 if no error occurred
+- * - -ENOMEM if there isn't enough memory to allocate the descriptor
+- * - -ENODEV if DECO0 couldn't be acquired
+- * - -EAGAIN if an error occurred when executing the descriptor
+- */
+-static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
++static int instantiate_rng(struct device *jrdev, u32 keys_generated)
+ {
+- u32 *desc, status;
+- int sh_idx, ret = 0;
++ struct instantiate_result instantiation;
+
+- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+- if (!desc)
++ dma_addr_t desc_dma;
++ u32 *desc;
++ int ret;
++
++ desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
++ if (!desc) {
++ dev_err(jrdev, "cannot allocate RNG init descriptor memory\n");
+ return -ENOMEM;
+-
+- for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+- /*
+- * If the corresponding bit is set, then it means the state
+- * handle was initialized by us, and thus it needs to be
+- * deintialized as well
+- */
+- if ((1 << sh_idx) & state_handle_mask) {
+- /*
+- * Create the descriptor for deinstantating this state
+- * handle
+- */
+- build_deinstantiation_desc(desc, sh_idx);
+-
+- /* Try to run it through DECO0 */
+- ret = run_descriptor_deco0(ctrldev, desc, &status);
+-
+- if (ret || status) {
+- dev_err(ctrldev,
+- "Failed to deinstantiate RNG4 SH%d\n",
+- sh_idx);
+- break;
+- }
+- dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+- }
+ }
+
+- kfree(desc);
+-
+- return ret;
+-}
++ build_instantiation_desc(desc);
+
+-static int caam_remove(struct platform_device *pdev)
+-{
+- struct device *ctrldev;
+- struct caam_drv_private *ctrlpriv;
+- struct caam_ctrl __iomem *ctrl;
+- int ring, ret = 0;
+-
+- ctrldev = &pdev->dev;
+- ctrlpriv = dev_get_drvdata(ctrldev);
+- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+-
+- /* Remove platform devices for JobRs */
+- for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+- if (ctrlpriv->jrpdev[ring])
+- of_device_unregister(ctrlpriv->jrpdev[ring]);
++ /* If keys have not been generated, add op code to generate key. */
++ if (!keys_generated)
++ generate_secure_keys_desc(desc);
++
++ desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
++ dma_sync_single_for_device(jrdev, desc_dma, desc_bytes(desc),
++ DMA_TO_DEVICE);
++ init_completion(&instantiation.completion);
++ ret = caam_jr_enqueue(jrdev, desc, rng4_init_done, &instantiation);
++ if (!ret) {
++ wait_for_completion_interruptible(&instantiation.completion);
++ ret = instantiation.err;
++ if (ret)
++ dev_err(jrdev, "unable to instantiate RNG\n");
+ }
+
+- /* De-initialize RNG state handles initialized by this driver. */
+- if (ctrlpriv->rng4_sh_init)
+- deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+-
+- /* Shut down debug views */
+-#ifdef CONFIG_DEBUG_FS
+- debugfs_remove_recursive(ctrlpriv->dfs_root);
+-#endif
++ dma_unmap_single(jrdev, desc_dma, desc_bytes(desc), DMA_TO_DEVICE);
+
+- /* Unmap controller region */
+- iounmap(&ctrl);
++ kfree(desc);
+
+ return ret;
+ }
+
+ /*
+- * kick_trng - sets the various parameters for enabling the initialization
+- * of the RNG4 block in CAAM
+- * @pdev - pointer to the platform device
+- * @ent_delay - Defines the length (in system clocks) of each entropy sample.
++ * By default, the TRNG runs for 200 clocks per sample;
++ * 1600 clocks per sample generates better entropy.
+ */
+-static void kick_trng(struct platform_device *pdev, int ent_delay)
++static void kick_trng(struct platform_device *pdev)
+ {
+ struct device *ctrldev = &pdev->dev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+- struct caam_ctrl __iomem *ctrl;
++ struct caam_full __iomem *topregs;
+ struct rng4tst __iomem *r4tst;
+ u32 val;
+
+- ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
+- r4tst = &ctrl->r4tst[0];
++ topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
++ r4tst = &topregs->ctrl.r4tst[0];
+
++ val = rd_reg32(&r4tst->rtmctl);
+ /* put RNG4 into program mode */
+ setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+-
+- /*
+- * Performance-wise, it does not make sense to
+- * set the delay to a value that is lower
+- * than the last one that worked (i.e. the state handles
+- * were instantiated properly. Thus, instead of wasting
+- * time trying to set the values controlling the sample
+- * frequency, the function simply returns.
+- */
+- val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+- >> RTSDCTL_ENT_DLY_SHIFT;
+- if (ent_delay <= val) {
+- /* put RNG4 into run mode */
+- clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+- return;
+- }
+-
++ /* Set clocks per sample to the default, and divider to zero */
+ val = rd_reg32(&r4tst->rtsdctl);
+ val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+- (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
++ (RNG4_ENT_CLOCKS_SAMPLE << RTSDCTL_ENT_DLY_SHIFT);
+ wr_reg32(&r4tst->rtsdctl, val);
+- /* min. freq. count, equal to 1/4 of the entropy sample length */
+- wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+- /* disable maximum frequency count */
+- wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
+- /* read the control register */
+- val = rd_reg32(&r4tst->rtmctl);
+- /*
+- * select raw sampling in both entropy shifter
+- * and statistical checker
+- */
+- setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
++ /* min. freq. count */
++ wr_reg32(&r4tst->rtfrqmin, RNG4_ENT_CLOCKS_SAMPLE / 4);
++ /* max. freq. count */
++ wr_reg32(&r4tst->rtfrqmax, RNG4_ENT_CLOCKS_SAMPLE * 8);
+ /* put RNG4 into run mode */
+- clrbits32(&val, RTMCTL_PRGM);
+- /* write back the control register */
+- wr_reg32(&r4tst->rtmctl, val);
++ clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+ }
+
+ /**
+ * caam_get_era() - Return the ERA of the SEC on SoC, based
+- * on "sec-era" propery in the DTS. This property is updated by u-boot.
++ * on the SEC_VID register.
++ * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
++ * @caam_id - the value of the SEC_VID register
+ **/
+-int caam_get_era(void)
++int caam_get_era(u64 caam_id)
+ {
+- struct device_node *caam_node;
+- for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+- const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
+- "fsl,sec-era",
+- NULL);
+- return prop ? *prop : -ENOTSUPP;
+- }
++ struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
++ static const struct {
++ u16 ip_id;
++ u8 maj_rev;
++ u8 era;
++ } caam_eras[] = {
++ {0x0A10, 1, 1},
++ {0x0A10, 2, 2},
++ {0x0A12, 1, 3},
++ {0x0A14, 1, 3},
++ {0x0A14, 2, 4},
++ {0x0A16, 1, 4},
++ {0x0A11, 1, 4},
++ {0x0A10, 3, 4},
++ {0x0A18, 1, 4},
++ {0x0A11, 2, 5},
++ {0x0A12, 2, 5},
++ {0x0A13, 1, 5},
++ {0x0A1C, 1, 5},
++ {0x0A12, 4, 6},
++ {0x0A13, 2, 6},
++ {0x0A16, 2, 6},
++ {0x0A18, 2, 6},
++ {0x0A1A, 1, 6},
++ {0x0A1C, 2, 6},
++ {0x0A17, 1, 6}
++ };
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
++ if (caam_eras[i].ip_id == sec_vid->ip_id &&
++ caam_eras[i].maj_rev == sec_vid->maj_rev)
++ return caam_eras[i].era;
+
+ return -ENOTSUPP;
+ }
+ EXPORT_SYMBOL(caam_get_era);
+
++/*
++ * Return a job ring device. This is available so outside
++ * entities can gain direct access to the job ring. For now,
++ * this function returns the first job ring (at index 0).
++ */
++struct device *caam_get_jrdev(void)
++{
++ return caam_jr_dev[0];
++}
++EXPORT_SYMBOL(caam_get_jrdev);
++
++
+ /* Probe routine for CAAM top (controller) level */
+ static int caam_probe(struct platform_device *pdev)
+ {
+- int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
++ int ret, ring, rspec;
+ u64 caam_id;
+ struct device *dev;
+ struct device_node *nprop, *np;
+ struct caam_ctrl __iomem *ctrl;
++ struct caam_full __iomem *topregs;
++ struct snvs_full __iomem *snvsregs;
+ struct caam_drv_private *ctrlpriv;
+ #ifdef CONFIG_DEBUG_FS
+ struct caam_perfmon *perfmon;
+ #endif
+- u32 scfgr, comp_params;
+- u32 cha_vid_ls;
+- int pg_size;
+- int BLOCK_OFFSET = 0;
+
+- ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
+- GFP_KERNEL);
++ ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
+ if (!ctrlpriv)
+ return -ENOMEM;
+
+@@ -415,71 +270,128 @@
+ dev_err(dev, "caam: of_iomap() failed\n");
+ return -ENOMEM;
+ }
+- /* Finding the page size for using the CTPR_MS register */
+- comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
+- pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
+-
+- /* Allocating the BLOCK_OFFSET based on the supported page size on
+- * the platform
+- */
+- if (pg_size == 0)
+- BLOCK_OFFSET = PG_SIZE_4K;
+- else
+- BLOCK_OFFSET = PG_SIZE_64K;
+-
+ ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
+- ctrlpriv->assure = (struct caam_assurance __force *)
+- ((uint8_t *)ctrl +
+- BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
+- );
+- ctrlpriv->deco = (struct caam_deco __force *)
+- ((uint8_t *)ctrl +
+- BLOCK_OFFSET * DECO_BLOCK_NUMBER
+- );
++
++ /* topregs used to derive pointers to CAAM sub-blocks only */
++ topregs = (struct caam_full __iomem *)ctrl;
+
+ /* Get the IRQ of the controller (for security violations only) */
+- ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
++ ctrlpriv->secvio_irq = of_irq_to_resource(nprop, 0, NULL);
++
++ /* Get SNVS register Page */
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-snvs");
++
++ if (!np)
++ return -ENODEV;
++
++ snvsregs = of_iomap(np, 0);
++ ctrlpriv->snvs = snvsregs;
++ /* Get CAAM-SM node and of_iomap() and save */
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-sm");
++
++ if (!np)
++ return -ENODEV;
++
++ ctrlpriv->sm_base = of_iomap(np, 0);
++ ctrlpriv->sm_size = 0x3fff;
++
++/*
++ * ARM targets tend to have clock control subsystems that can
++ * enable/disable clocking to our device. Turn clocking on to proceed
++ */
++#ifdef CONFIG_ARM
++ ctrlpriv->caam_ipg = devm_clk_get(&ctrlpriv->pdev->dev, "caam_ipg");
++ if (IS_ERR(ctrlpriv->caam_ipg)) {
++ ret = PTR_ERR(ctrlpriv->caam_ipg);
++ dev_err(&ctrlpriv->pdev->dev,
++ "can't identify CAAM ipg clk: %d\n", ret);
++ return -ENODEV;
++ }
++ ctrlpriv->caam_mem = devm_clk_get(&ctrlpriv->pdev->dev, "caam_mem");
++ if (IS_ERR(ctrlpriv->caam_mem)) {
++ ret = PTR_ERR(ctrlpriv->caam_mem);
++ dev_err(&ctrlpriv->pdev->dev,
++ "can't identify CAAM secure mem clk: %d\n", ret);
++ return -ENODEV;
++ }
++ ctrlpriv->caam_aclk = devm_clk_get(&ctrlpriv->pdev->dev, "caam_aclk");
++ if (IS_ERR(ctrlpriv->caam_aclk)) {
++ ret = PTR_ERR(ctrlpriv->caam_aclk);
++ dev_err(&ctrlpriv->pdev->dev,
++ "can't identify CAAM aclk clk: %d\n", ret);
++ return -ENODEV;
++ }
++
++ ret = clk_prepare(ctrlpriv->caam_ipg);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can't prepare CAAM ipg clock: %d\n", ret);
++ return -ENODEV;
++ }
++ ret = clk_prepare(ctrlpriv->caam_mem);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can't prepare CAAM secure mem clock: %d\n", ret);
++ return -ENODEV;
++ }
++ ret = clk_prepare(ctrlpriv->caam_aclk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can't prepare CAAM aclk clock: %d\n", ret);
++ return -ENODEV;
++ }
++
++ ret = clk_enable(ctrlpriv->caam_ipg);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
++ return -ENODEV;
++ }
++ ret = clk_enable(ctrlpriv->caam_mem);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n", ret);
++ return -ENODEV;
++ }
++ ret = clk_enable(ctrlpriv->caam_aclk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
++ return -ENODEV;
++ }
++
++ pr_debug("%s caam_ipg clock:%d\n", __func__,
++ (int)clk_get_rate(ctrlpriv->caam_ipg));
++ pr_debug("%s caam_mem clock:%d\n", __func__,
++ (int)clk_get_rate(ctrlpriv->caam_mem));
++ pr_debug("%s caam_aclk clock:%d\n", __func__,
++ (int)clk_get_rate(ctrlpriv->caam_aclk));
++#endif
+
+ /*
+ * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
+ * long pointers in master configuration register
+ */
+- setbits32(&ctrl->mcr, MCFGR_WDENABLE |
++ setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
+
++#ifdef CONFIG_ARCH_MX6
+ /*
+- * Read the Compile Time paramters and SCFGR to determine
+- * if Virtualization is enabled for this platform
++ * ERRATA: mx6 devices have an issue wherein AXI bus transactions
++ * may not occur in the correct order. This isn't a problem running
++ * single descriptors, but can be if running multiple concurrent
++ * descriptors. Reworking the driver to throttle to single requests
++ * is impractical, thus the workaround is to limit the AXI pipeline
++ * to a depth of 1 (from it's default of 4) to preclude this situation
++ * from occurring.
+ */
+- scfgr = rd_reg32(&ctrl->scfgr);
+-
+- ctrlpriv->virt_en = 0;
+- if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+- /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+- * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+- */
+- if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+- (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+- (scfgr & SCFGR_VIRT_EN)))
+- ctrlpriv->virt_en = 1;
+- } else {
+- /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+- if (comp_params & CTPR_MS_VIRT_EN_POR)
+- ctrlpriv->virt_en = 1;
+- }
+-
+- if (ctrlpriv->virt_en == 1)
+- setbits32(&ctrl->jrstart, JRSTART_JR0_START |
+- JRSTART_JR1_START | JRSTART_JR2_START |
+- JRSTART_JR3_START);
++ wr_reg32(&topregs->ctrl.mcr,
++ (rd_reg32(&topregs->ctrl.mcr) & ~(MCFGR_AXIPIPE_MASK)) |
++ ((1 << MCFGR_AXIPIPE_SHIFT) & MCFGR_AXIPIPE_MASK));
++#endif
+
++ /* Set DMA masks according to platform ranging */
+ if (sizeof(dma_addr_t) == sizeof(u64))
+- if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
+- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
++ if (of_device_is_compatible(nprop, "fsl,sec-v4.0"))
++ dma_set_mask(dev, DMA_BIT_MASK(40));
+ else
+- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
++ dma_set_mask(dev, DMA_BIT_MASK(36));
+ else
+- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
++ dma_set_mask(dev, DMA_BIT_MASK(32));
+
+ /*
+ * Detect and enable JobRs
+@@ -487,51 +399,65 @@
+ * for all, then go probe each one.
+ */
+ rspec = 0;
+- for_each_available_child_of_node(nprop, np)
+- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+- of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
++ for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
++ rspec++;
++ if (!rspec) {
++ /* for backward compatible with device trees */
++ for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
+ rspec++;
++ }
+
+- ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
+- sizeof(struct platform_device *) * rspec,
+- GFP_KERNEL);
+- if (ctrlpriv->jrpdev == NULL) {
+- iounmap(&ctrl);
++ ctrlpriv->jrdev = kzalloc(sizeof(struct device *) * rspec, GFP_KERNEL);
++ if (ctrlpriv->jrdev == NULL) {
++ iounmap(&topregs->ctrl);
+ return -ENOMEM;
+ }
+
+ ring = 0;
+ ctrlpriv->total_jobrs = 0;
+- for_each_available_child_of_node(nprop, np)
+- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+- of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
+- ctrlpriv->jrpdev[ring] =
+- of_platform_device_create(np, NULL, dev);
+- if (!ctrlpriv->jrpdev[ring]) {
+- pr_warn("JR%d Platform device creation error\n",
+- ring);
+- continue;
++ for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
++ ret = caam_jr_probe(pdev, np, ring);
++ if (ret < 0) {
++ /*
++ * Job ring not found, error out. At some
++ * point, we should enhance job ring handling
++ * to allow for non-consecutive job rings to
++ * be found.
++ */
++ pr_err("fsl,sec-v4.0-job-ring not found ");
++ pr_err("(ring %d)\n", ring);
++ return ret;
++ }
++ ctrlpriv->total_jobrs++;
++ ring++;
++ }
++
++ if (!ring) {
++ for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
++ ret = caam_jr_probe(pdev, np, ring);
++ if (ret < 0) {
++ /*
++ * Job ring not found, error out. At some
++ * point, we should enhance job ring handling
++ * to allow for non-consecutive job rings to
++ * be found.
++ */
++ pr_err("fsl,sec4.0-job-ring not found ");
++ pr_err("(ring %d)\n", ring);
++ return ret;
+ }
+- ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
+- ((uint8_t *)ctrl +
+- (ring + JR_BLOCK_NUMBER) *
+- BLOCK_OFFSET
+- );
+ ctrlpriv->total_jobrs++;
+ ring++;
++ }
+ }
+
+ /* Check to see if QI present. If so, enable */
+- ctrlpriv->qi_present =
+- !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
+- CTPR_MS_QI_MASK);
++ ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
++ CTPR_QI_MASK);
+ if (ctrlpriv->qi_present) {
+- ctrlpriv->qi = (struct caam_queue_if __force *)
+- ((uint8_t *)ctrl +
+- BLOCK_OFFSET * QI_BLOCK_NUMBER
+- );
++ ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
+ /* This is all that's required to physically enable QI */
+- wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
++ wr_reg32(&topregs->qi.qi_control_lo, QICTL_DQEN);
+ }
+
+ /* If no QI and no rings specified, quit and go home */
+@@ -541,81 +467,53 @@
+ return -ENOMEM;
+ }
+
+- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+-
+ /*
+- * If SEC has RNG version >= 4 and RNG state handle has not been
+- * already instantiated, do RNG instantiation
++ * RNG4 based SECs (v5+ | >= i.MX6) need special initialization prior
++ * to executing any descriptors. If there's a problem with init,
++ * remove other subsystems and return; internal padding functions
++ * cannot run without an RNG. This procedure assumes a single RNG4
++ * instance.
+ */
+- if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+- ctrlpriv->rng4_sh_init =
+- rd_reg32(&ctrl->r4tst[0].rdsta);
++ if ((rd_reg64(&topregs->ctrl.perfmon.cha_id) & CHA_ID_RNG_MASK)
++ == CHA_ID_RNG_4) {
++ struct rng4tst __iomem *r4tst;
++ u32 rdsta, rng_if, rng_skvn;
++
+ /*
+- * If the secure keys (TDKEK, JDKEK, TDSK), were already
+- * generated, signal this to the function that is instantiating
+- * the state handles. An error would occur if RNG4 attempts
+- * to regenerate these keys before the next POR.
++ * Check to see if the RNG has already been instantiated.
++ * If either the state 0 or 1 instantiated flags are set,
++ * then don't continue on and try to instantiate the RNG
++ * again.
+ */
+- gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+- ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+- do {
+- int inst_handles =
+- rd_reg32(&ctrl->r4tst[0].rdsta) &
+- RDSTA_IFMASK;
+- /*
+- * If either SH were instantiated by somebody else
+- * (e.g. u-boot) then it is assumed that the entropy
+- * parameters are properly set and thus the function
+- * setting these (kick_trng(...)) is skipped.
+- * Also, if a handle was instantiated, do not change
+- * the TRNG parameters.
+- */
+- if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+- dev_info(dev,
+- "Entropy delay = %u\n",
+- ent_delay);
+- kick_trng(pdev, ent_delay);
+- ent_delay += 400;
++ r4tst = &topregs->ctrl.r4tst[0];
++ rdsta = rd_reg32(&r4tst->rdsta); /* Read RDSTA register */
++
++ /* Check IF bit for non-deterministic instantiation */
++ rng_if = rdsta & RDSTA_IF;
++
++ /* Check SKVN bit for non-deterministic key generation */
++ rng_skvn = rdsta & RDSTA_SKVN;
++ if (!rng_if) {
++ kick_trng(pdev);
++ ret = instantiate_rng(ctrlpriv->jrdev[0], rng_skvn);
++ if (ret) {
++ caam_remove(pdev);
++ return -ENODEV;
+ }
+- /*
+- * if instantiate_rng(...) fails, the loop will rerun
+- * and the kick_trng(...) function will modfiy the
+- * upper and lower limits of the entropy sampling
+- * interval, leading to a sucessful initialization of
+- * the RNG.
+- */
+- ret = instantiate_rng(dev, inst_handles,
+- gen_sk);
+- if (ret == -EAGAIN)
+- /*
+- * if here, the loop will rerun,
+- * so don't hog the CPU
+- */
+- cpu_relax();
+- } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
+- if (ret) {
+- dev_err(dev, "failed to instantiate RNG");
+- caam_remove(pdev);
+- return ret;
++ ctrlpriv->rng_inst++;
+ }
+- /*
+- * Set handles init'ed by this module as the complement of the
+- * already initialized ones
+- */
+- ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
+-
+- /* Enable RDB bit so that RNG works faster */
+- setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
+ }
+
+ /* NOTE: RTIC detection ought to go here, around Si time */
+
+- caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
+- (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
++ /* Initialize queue allocator lock */
++ spin_lock_init(&ctrlpriv->jr_alloc_lock);
++
++ caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
+
+ /* Report "alive" for developer to see */
+ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
+- caam_get_era());
++ caam_get_era(caam_id));
+ dev_info(dev, "job rings = %d, qi = %d\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present);
+
+@@ -627,7 +525,7 @@
+ */
+ perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
+
+- ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
++ ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
+ ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
+
+ /* Controller-level - performance monitor counters */
+@@ -716,6 +614,7 @@
+ static struct platform_driver caam_driver = {
+ .driver = {
+ .name = "caam",
++ .owner = THIS_MODULE,
+ .of_match_table = caam_match,
+ },
+ .probe = caam_probe,
+diff -Nur linux-4.1.3/drivers/crypto/caam/ctrl.h linux-xbian-imx6/drivers/crypto/caam/ctrl.h
+--- linux-4.1.3/drivers/crypto/caam/ctrl.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/ctrl.h 2015-07-27 23:13:04.209961631 +0200
+@@ -8,6 +8,6 @@
+ #define CTRL_H
+
+ /* Prototypes for backend-level services exposed to APIs */
+-int caam_get_era(void);
++int caam_get_era(u64 caam_id);
+
+ #endif /* CTRL_H */
+diff -Nur linux-4.1.3/drivers/crypto/caam/desc_constr.h linux-xbian-imx6/drivers/crypto/caam/desc_constr.h
+--- linux-4.1.3/drivers/crypto/caam/desc_constr.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/desc_constr.h 2015-07-27 23:13:04.209961631 +0200
+@@ -10,7 +10,6 @@
+ #define CAAM_CMD_SZ sizeof(u32)
+ #define CAAM_PTR_SZ sizeof(dma_addr_t)
+ #define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+-#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+ #ifdef DEBUG
+ #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
+@@ -111,26 +110,6 @@
+ (*desc)++;
+ }
+
+-#define append_u32 append_cmd
+-
+-static inline void append_u64(u32 *desc, u64 data)
+-{
+- u32 *offset = desc_end(desc);
+-
+- *offset = upper_32_bits(data);
+- *(++offset) = lower_32_bits(data);
+-
+- (*desc) += 2;
+-}
+-
+-/* Write command without affecting header, and return pointer to next word */
+-static inline u32 *write_cmd(u32 *desc, u32 command)
+-{
+- *desc = command;
+-
+- return desc + 1;
+-}
+-
+ static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len,
+ u32 command)
+ {
+@@ -143,8 +122,7 @@
+ unsigned int len, u32 command)
+ {
+ append_cmd(desc, command);
+- if (!(command & (SQIN_RTO | SQIN_PRE)))
+- append_ptr(desc, ptr);
++ append_ptr(desc, ptr);
+ append_cmd(desc, len);
+ }
+
+@@ -155,29 +133,21 @@
+ append_data(desc, data, len);
+ }
+
+-#define APPEND_CMD_RET(cmd, op) \
+-static inline u32 *append_##cmd(u32 *desc, u32 options) \
+-{ \
+- u32 *cmd = desc_end(desc); \
+- PRINT_POS; \
+- append_cmd(desc, CMD_##op | options); \
+- return cmd; \
++static inline u32 *append_jump(u32 *desc, u32 options)
++{
++ u32 *cmd = desc_end(desc);
++
++ PRINT_POS;
++ append_cmd(desc, CMD_JUMP | options);
++
++ return cmd;
+ }
+-APPEND_CMD_RET(jump, JUMP)
+-APPEND_CMD_RET(move, MOVE)
+
+ static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
+ {
+ *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
+ }
+
+-static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+-{
+- *move_cmd &= ~MOVE_OFFSET_MASK;
+- *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
+- MOVE_OFFSET_MASK);
+-}
+-
+ #define APPEND_CMD(cmd, op) \
+ static inline void append_##cmd(u32 *desc, u32 options) \
+ { \
+@@ -185,6 +155,7 @@
+ append_cmd(desc, CMD_##op | options); \
+ }
+ APPEND_CMD(operation, OPERATION)
++APPEND_CMD(move, MOVE)
+
+ #define APPEND_CMD_LEN(cmd, op) \
+ static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
+@@ -192,8 +163,6 @@
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | len | options); \
+ }
+-
+-APPEND_CMD_LEN(seq_load, SEQ_LOAD)
+ APPEND_CMD_LEN(seq_store, SEQ_STORE)
+ APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
+ APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
+@@ -207,36 +176,17 @@
+ }
+ APPEND_CMD_PTR(key, KEY)
+ APPEND_CMD_PTR(load, LOAD)
++APPEND_CMD_PTR(store, STORE)
+ APPEND_CMD_PTR(fifo_load, FIFO_LOAD)
+ APPEND_CMD_PTR(fifo_store, FIFO_STORE)
+
+-static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len,
+- u32 options)
+-{
+- u32 cmd_src;
+-
+- cmd_src = options & LDST_SRCDST_MASK;
+-
+- append_cmd(desc, CMD_STORE | options | len);
+-
+- /* The following options do not require pointer */
+- if (!(cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED ||
+- cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB ||
+- cmd_src == LDST_SRCDST_WORD_DESCBUF_JOB_WE ||
+- cmd_src == LDST_SRCDST_WORD_DESCBUF_SHARED_WE))
+- append_ptr(desc, ptr);
+-}
+-
+ #define APPEND_SEQ_PTR_INTLEN(cmd, op) \
+ static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
+ unsigned int len, \
+ u32 options) \
+ { \
+ PRINT_POS; \
+- if (options & (SQIN_RTO | SQIN_PRE)) \
+- append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \
+- else \
+- append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
++ append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
+ }
+ APPEND_SEQ_PTR_INTLEN(in, IN)
+ APPEND_SEQ_PTR_INTLEN(out, OUT)
+@@ -309,7 +259,7 @@
+ */
+ #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+- MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32)len);
++ MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK));
+
+ #define append_math_add(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADD, desc, dest, src0, src1, len)
+@@ -329,15 +279,13 @@
+ APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+ #define append_math_rshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+-#define append_math_ldshift(desc, dest, src0, src1, len) \
+- APPEND_MATH(SHLD, desc, dest, src0, src1, len)
+
+ /* Exactly one source is IMM. Data is passed in as u32 value */
+ #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+ do { \
+ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+ append_cmd(desc, data); \
+-} while (0)
++} while (0);
+
+ #define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+@@ -357,34 +305,3 @@
+ APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+ #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
+-
+-/* Exactly one source is IMM. Data is passed in as u64 value */
+-#define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \
+-do { \
+- u32 upper = (data >> 16) >> 16; \
+- APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \
+- (upper ? 0 : MATH_IFB)); \
+- if (upper) \
+- append_u64(desc, data); \
+- else \
+- append_u32(desc, data); \
+-} while (0)
+-
+-#define append_math_add_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data)
+-#define append_math_sub_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data)
+-#define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data)
+-#define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data)
+-#define append_math_and_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data)
+-#define append_math_or_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data)
+-#define append_math_xor_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data)
+-#define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
+-#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
+- APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+diff -Nur linux-4.1.3/drivers/crypto/caam/desc.h linux-xbian-imx6/drivers/crypto/caam/desc.h
+--- linux-4.1.3/drivers/crypto/caam/desc.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/desc.h 2015-07-27 23:13:04.209961631 +0200
+@@ -2,19 +2,35 @@
+ * CAAM descriptor composition header
+ * Definitions to support CAAM descriptor instruction generation
+ *
+- * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ */
+
+ #ifndef DESC_H
+ #define DESC_H
+
++/*
++ * 16-byte hardware scatter/gather table
++ * An 8-byte table exists in the hardware spec, but has never been
++ * implemented to date. The 8/16 option is selected at RTL-compile-time.
++ * and this selection is visible in the Compile Time Parameters Register
++ */
++
++#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
++#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
++#define SEC4_SG_BPID_MASK 0x000000ff
++#define SEC4_SG_BPID_SHIFT 16
++#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
++#define SEC4_SG_OFFS_MASK 0x00001fff
++
+ struct sec4_sg_entry {
++#ifdef CONFIG_64BIT
+ u64 ptr;
+-#define SEC4_SG_LEN_FIN 0x40000000
+-#define SEC4_SG_LEN_EXT 0x80000000
++#else
++ u32 reserved;
++ u32 ptr;
++#endif
+ u32 len;
+- u8 reserved;
+- u8 buf_pool_id;
++ u16 buf_pool_id;
+ u16 offset;
+ };
+
+@@ -231,12 +247,7 @@
+ #define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+-#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+-#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT)
+-#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+-#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+-#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+ #define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+ /* Offset in source/destination */
+@@ -321,6 +332,7 @@
+ /* Continue - Not the last FIFO store to come */
+ #define FIFOST_CONT_SHIFT 23
+ #define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
++#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+
+ /*
+ * Extended Length - use 32-bit extended length that
+@@ -370,7 +382,6 @@
+ #define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+ #define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+ #define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+-#define FIFOLD_TYPE_NOINFOFIFO (0x0F << FIFOLD_TYPE_SHIFT)
+
+ #define FIFOLDST_LEN_MASK 0xffff
+ #define FIFOLDST_EXT_LEN_MASK 0xffffffff
+@@ -1092,6 +1103,23 @@
+ #define OP_PCL_PKPROT_ECC 0x0002
+ #define OP_PCL_PKPROT_F2M 0x0001
+
++/* Blob protocol protinfo bits */
++#define OP_PCL_BLOB_TK 0x0200
++#define OP_PCL_BLOB_EKT 0x0100
++
++#define OP_PCL_BLOB_K2KR_MEM 0x0000
++#define OP_PCL_BLOB_K2KR_C1KR 0x0010
++#define OP_PCL_BLOB_K2KR_C2KR 0x0030
++#define OP_PCL_BLOB_K2KR_AFHAS 0x0050
++#define OP_PCL_BLOB_K2KR_C2KR_SPLIT 0x0070
++
++#define OP_PCL_BLOB_PTXT_SECMEM 0x0008
++#define OP_PCL_BLOB_BLACK 0x0004
++
++#define OP_PCL_BLOB_FMT_NORMAL 0x0000
++#define OP_PCL_BLOB_FMT_MSTR 0x0002
++#define OP_PCL_BLOB_FMT_TEST 0x0003
++
+ /* For non-protocol/alg-only op commands */
+ #define OP_ALG_TYPE_SHIFT 24
+ #define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+@@ -1154,15 +1182,8 @@
+
+ /* randomizer AAI set */
+ #define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
+-#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+-#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+-
+-/* RNG4 AAI set */
+-#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_SHIFT)
+-#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_SHIFT)
+-#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+-#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+-#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
++#define OP_ALG_AAI_RNG_NOZERO (0x10 << OP_ALG_AAI_SHIFT)
++#define OP_ALG_AAI_RNG_ODD (0x20 << OP_ALG_AAI_SHIFT)
+
+ /* hmac/smac AAI set */
+ #define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+@@ -1184,6 +1205,12 @@
+ #define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
+ #define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+
++/* RNG4 set */
++#define OP_ALG_RNG4_SHIFT 4
++#define OP_ALG_RNG4_MASK (0x1f3 << OP_ALG_RNG4_SHIFT)
++
++#define OP_ALG_RNG4_SK (0x100 << OP_ALG_RNG4_SHIFT)
++
+ #define OP_ALG_AS_SHIFT 2
+ #define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+ #define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
+@@ -1300,10 +1327,10 @@
+ #define SQOUT_SGF 0x01000000
+
+ /* Appends to a previous pointer */
+-#define SQOUT_PRE SQIN_PRE
++#define SQOUT_PRE 0x00800000
+
+ /* Restore sequence with pointer/length */
+-#define SQOUT_RTO SQIN_RTO
++#define SQOUT_RTO 0x00200000
+
+ /* Use extended length following pointer */
+ #define SQOUT_EXT 0x00400000
+@@ -1365,7 +1392,6 @@
+ #define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+ #define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+ #define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+-#define MOVE_DEST_INFIFO_NOINFO (0x0a << MOVE_DEST_SHIFT)
+ #define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+ #define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+ #define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+@@ -1418,7 +1444,6 @@
+ #define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+ #define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+ #define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+-#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+ #define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+ #define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+ #define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+@@ -1433,7 +1458,6 @@
+ #define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+-#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+ #define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+ #define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+@@ -1609,13 +1633,28 @@
+ #define NFIFOENTRY_PLEN_SHIFT 0
+ #define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+-/* Append Load Immediate Command */
+-#define FD_CMD_APPEND_LOAD_IMMEDIATE 0x80000000
++/*
++ * PDB internal definitions
++ */
++
++/* IPSec ESP CBC Encap/Decap Options */
++#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
++#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
++#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
++#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
++#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
++#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
++#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
++#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
++#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
++#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
++
++#define ARC4_BLOCK_SIZE 1
++#define ARC4_MAX_KEY_SIZE 256
++#define ARC4_MIN_KEY_SIZE 1
+
+-/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+-#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN 0x40000000
++#define XCBC_MAC_DIGEST_SIZE 16
++#define XCBC_MAC_BLOCK_WORDS 16
+
+-/* Frame Descriptor Command for Replacement Job Descriptor */
+-#define FD_CMD_REPLACE_JOB_DESC 0x20000000
+
+ #endif /* DESC_H */
+diff -Nur linux-4.1.3/drivers/crypto/caam/error.c linux-xbian-imx6/drivers/crypto/caam/error.c
+--- linux-4.1.3/drivers/crypto/caam/error.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/error.c 2015-07-27 23:13:04.213947410 +0200
+@@ -11,243 +11,264 @@
+ #include "jr.h"
+ #include "error.h"
+
+-static const struct {
+- u8 value;
+- const char *error_text;
+-} desc_error_list[] = {
+- { 0x00, "No error." },
+- { 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." },
+- { 0x02, "SGT Null Entry Error." },
+- { 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." },
+- { 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." },
+- { 0x05, "Reserved." },
+- { 0x06, "Invalid KEY Command" },
+- { 0x07, "Invalid LOAD Command" },
+- { 0x08, "Invalid STORE Command" },
+- { 0x09, "Invalid OPERATION Command" },
+- { 0x0A, "Invalid FIFO LOAD Command" },
+- { 0x0B, "Invalid FIFO STORE Command" },
+- { 0x0C, "Invalid MOVE/MOVE_LEN Command" },
+- { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." },
+- { 0x0E, "Invalid MATH Command" },
+- { 0x0F, "Invalid SIGNATURE Command" },
+- { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." },
+- { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
+- { 0x12, "Shared Descriptor Header Error" },
+- { 0x13, "Header Error. Invalid length or parity, or certain other problems." },
+- { 0x14, "Burster Error. Burster has gotten to an illegal state" },
+- { 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." },
+- { 0x16, "DMA Error" },
+- { 0x17, "Reserved." },
+- { 0x1A, "Job failed due to JR reset" },
+- { 0x1B, "Job failed due to Fail Mode" },
+- { 0x1C, "DECO Watchdog timer timeout error" },
+- { 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" },
+- { 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" },
+- { 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." },
+- { 0x20, "DECO has completed a reset initiated via the DRR register" },
+- { 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." },
+- { 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." },
+- { 0x23, "Read Input Frame error" },
+- { 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
+- { 0x80, "DNR (do not run) error" },
+- { 0x81, "undefined protocol command" },
+- { 0x82, "invalid setting in PDB" },
+- { 0x83, "Anti-replay LATE error" },
+- { 0x84, "Anti-replay REPLAY error" },
+- { 0x85, "Sequence number overflow" },
+- { 0x86, "Sigver invalid signature" },
+- { 0x87, "DSA Sign Illegal test descriptor" },
+- { 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." },
+- { 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." },
+- { 0xC1, "Blob Command error: Undefined mode" },
+- { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
+- { 0xC4, "Blob Command error: Black Blob key or input size error" },
+- { 0xC5, "Blob Command error: Invalid key destination" },
+- { 0xC8, "Blob Command error: Trusted/Secure mode error" },
+- { 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" },
+- { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
+-};
+-
+-static const char * const cha_id_list[] = {
+- "",
+- "AES",
+- "DES",
+- "ARC4",
+- "MDHA",
+- "RNG",
+- "SNOW f8",
+- "Kasumi f8/9",
+- "PKHA",
+- "CRCA",
+- "SNOW f9",
+- "ZUCE",
+- "ZUCA",
+-};
+-
+-static const char * const err_id_list[] = {
+- "No error.",
+- "Mode error.",
+- "Data size error.",
+- "Key size error.",
+- "PKHA A memory size error.",
+- "PKHA B memory size error.",
+- "Data arrived out of sequence error.",
+- "PKHA divide-by-zero error.",
+- "PKHA modulus even error.",
+- "DES key parity error.",
+- "ICV check failed.",
+- "Hardware error.",
+- "Unsupported CCM AAD size.",
+- "Class 1 CHA is not reset",
+- "Invalid CHA combination was selected",
+- "Invalid CHA selected.",
+-};
+-
+-static const char * const rng_err_id_list[] = {
+- "",
+- "",
+- "",
+- "Instantiate",
+- "Not instantiated",
+- "Test instantiate",
+- "Prediction resistance",
+- "Prediction resistance and test request",
+- "Uninstantiate",
+- "Secure key generation",
+-};
++#define SPRINTFCAT(str, format, param, max_alloc) \
++{ \
++ char *tmp; \
++ \
++ tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
++ if (likely(tmp)) { \
++ sprintf(tmp, format, param); \
++ strcat(str, tmp); \
++ kfree(tmp); \
++ } else { \
++ strcat(str, "kmalloc failure in SPRINTFCAT"); \
++ } \
++}
+
+-static void report_ccb_status(struct device *jrdev, const u32 status,
+- const char *error)
++static void report_jump_idx(u32 status, char *outstr)
+ {
+- u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
+- JRSTA_CCBERR_CHAID_SHIFT;
+- u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
+ u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+ JRSTA_DECOERR_INDEX_SHIFT;
+- char *idx_str;
+- const char *cha_str = "unidentified cha_id value 0x";
+- char cha_err_code[3] = { 0 };
+- const char *err_str = "unidentified err_id value 0x";
+- char err_err_code[3] = { 0 };
+
+ if (status & JRSTA_DECOERR_JUMP)
+- idx_str = "jump tgt desc idx";
++ strcat(outstr, "jump tgt desc idx ");
+ else
+- idx_str = "desc idx";
++ strcat(outstr, "desc idx ");
+
+- if (cha_id < ARRAY_SIZE(cha_id_list))
+- cha_str = cha_id_list[cha_id];
+- else
+- snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id);
++ SPRINTFCAT(outstr, "%d: ", idx, sizeof("255"));
++}
++
++static void report_ccb_status(u32 status, char *outstr)
++{
++ static const char * const cha_id_list[] = {
++ "",
++ "AES",
++ "DES",
++ "ARC4",
++ "MDHA",
++ "RNG",
++ "SNOW f8",
++ "Kasumi f8/9",
++ "PKHA",
++ "CRCA",
++ "SNOW f9",
++ "ZUCE",
++ "ZUCA",
++ };
++ static const char * const err_id_list[] = {
++ "No error.",
++ "Mode error.",
++ "Data size error.",
++ "Key size error.",
++ "PKHA A memory size error.",
++ "PKHA B memory size error.",
++ "Data arrived out of sequence error.",
++ "PKHA divide-by-zero error.",
++ "PKHA modulus even error.",
++ "DES key parity error.",
++ "ICV check failed.",
++ "Hardware error.",
++ "Unsupported CCM AAD size.",
++ "Class 1 CHA is not reset",
++ "Invalid CHA combination was selected",
++ "Invalid CHA selected.",
++ };
++ static const char * const rng_err_id_list[] = {
++ "",
++ "",
++ "",
++ "Instantiate",
++ "Not instantiated",
++ "Test instantiate",
++ "Prediction resistance",
++ "Prediction resistance and test request",
++ "Uninstantiate",
++ "Secure key generation",
++ };
++ u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >>
++ JRSTA_CCBERR_CHAID_SHIFT;
++ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++
++ report_jump_idx(status, outstr);
++
++ if (cha_id < ARRAY_SIZE(cha_id_list)) {
++ SPRINTFCAT(outstr, "%s: ", cha_id_list[cha_id],
++ strlen(cha_id_list[cha_id]));
++ } else {
++ SPRINTFCAT(outstr, "unidentified cha_id value 0x%02x: ",
++ cha_id, sizeof("ff"));
++ }
+
+ if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG &&
+ err_id < ARRAY_SIZE(rng_err_id_list) &&
+ strlen(rng_err_id_list[err_id])) {
+ /* RNG-only error */
+- err_str = rng_err_id_list[err_id];
+- } else if (err_id < ARRAY_SIZE(err_id_list))
+- err_str = err_id_list[err_id];
+- else
+- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+-
+- /*
+- * CCB ICV check failures are part of normal operation life;
+- * we leave the upper layers to do what they want with them.
+- */
+- if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+- dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
+- status, error, idx_str, idx,
+- cha_str, cha_err_code,
+- err_str, err_err_code);
++ SPRINTFCAT(outstr, "%s", rng_err_id_list[err_id],
++ strlen(rng_err_id_list[err_id]));
++ } else if (err_id < ARRAY_SIZE(err_id_list)) {
++ SPRINTFCAT(outstr, "%s", err_id_list[err_id],
++ strlen(err_id_list[err_id]));
++ } else {
++ SPRINTFCAT(outstr, "unidentified err_id value 0x%02x",
++ err_id, sizeof("ff"));
++ }
+ }
+
+-static void report_jump_status(struct device *jrdev, const u32 status,
+- const char *error)
++static void report_jump_status(u32 status, char *outstr)
+ {
+- dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+- status, error, __func__);
++ SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+ }
+
+-static void report_deco_status(struct device *jrdev, const u32 status,
+- const char *error)
++static void report_deco_status(u32 status, char *outstr)
+ {
+- u8 err_id = status & JRSTA_DECOERR_ERROR_MASK;
+- u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >>
+- JRSTA_DECOERR_INDEX_SHIFT;
+- char *idx_str;
+- const char *err_str = "unidentified error value 0x";
+- char err_err_code[3] = { 0 };
++ static const struct {
++ u8 value;
++ char *error_text;
++ } desc_error_list[] = {
++ { 0x00, "No error." },
++ { 0x01, "SGT Length Error. The descriptor is trying to read "
++ "more data than is contained in the SGT table." },
++ { 0x02, "SGT Null Entry Error." },
++ { 0x03, "Job Ring Control Error. There is a bad value in the "
++ "Job Ring Control register." },
++ { 0x04, "Invalid Descriptor Command. The Descriptor Command "
++ "field is invalid." },
++ { 0x05, "Reserved." },
++ { 0x06, "Invalid KEY Command" },
++ { 0x07, "Invalid LOAD Command" },
++ { 0x08, "Invalid STORE Command" },
++ { 0x09, "Invalid OPERATION Command" },
++ { 0x0A, "Invalid FIFO LOAD Command" },
++ { 0x0B, "Invalid FIFO STORE Command" },
++ { 0x0C, "Invalid MOVE/MOVE_LEN Command" },
++ { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is "
++ "invalid because the target is not a Job Header "
++ "Command, or the jump is from a Trusted Descriptor to "
++ "a Job Descriptor, or because the target Descriptor "
++ "contains a Shared Descriptor." },
++ { 0x0E, "Invalid MATH Command" },
++ { 0x0F, "Invalid SIGNATURE Command" },
++ { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR "
++ "Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO "
++ "LOAD, or SEQ FIFO STORE decremented the input or "
++ "output sequence length below 0. This error may result "
++ "if a built-in PROTOCOL Command has encountered a "
++ "malformed PDU." },
++ { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."},
++ { 0x12, "Shared Descriptor Header Error" },
++ { 0x13, "Header Error. Invalid length or parity, or certain "
++ "other problems." },
++ { 0x14, "Burster Error. Burster has gotten to an illegal "
++ "state" },
++ { 0x15, "Context Register Length Error. The descriptor is "
++ "trying to read or write past the end of the Context "
++ "Register. A SEQ LOAD or SEQ STORE with the VLF bit "
++ "set was executed with too large a length in the "
++ "variable length register (VSOL for SEQ STORE or VSIL "
++ "for SEQ LOAD)." },
++ { 0x16, "DMA Error" },
++ { 0x17, "Reserved." },
++ { 0x1A, "Job failed due to JR reset" },
++ { 0x1B, "Job failed due to Fail Mode" },
++ { 0x1C, "DECO Watchdog timer timeout error" },
++ { 0x1D, "DECO tried to copy a key from another DECO but the "
++ "other DECO's Key Registers were locked" },
++ { 0x1E, "DECO attempted to copy data from a DECO that had an "
++ "unmasked Descriptor error" },
++ { 0x1F, "LIODN error. DECO was trying to share from itself or "
++ "from another DECO but the two Non-SEQ LIODN values "
++ "didn't match or the 'shared from' DECO's Descriptor "
++ "required that the SEQ LIODNs be the same and they "
++ "aren't." },
++ { 0x20, "DECO has completed a reset initiated via the DRR "
++ "register" },
++ { 0x21, "Nonce error. When using EKT (CCM) key encryption "
++ "option in the FIFO STORE Command, the Nonce counter "
++ "reached its maximum value and this encryption mode "
++ "can no longer be used." },
++ { 0x22, "Meta data is too large (> 511 bytes) for TLS decap "
++ "(input frame; block ciphers) and IPsec decap (output "
++ "frame, when doing the next header byte update) and "
++ "DCRC (output frame)." },
++ { 0x23, "Read Input Frame error" },
++ { 0x24, "JDKEK, TDKEK or TDSK not loaded error" },
++ { 0x80, "DNR (do not run) error" },
++ { 0x81, "undefined protocol command" },
++ { 0x82, "invalid setting in PDB" },
++ { 0x83, "Anti-replay LATE error" },
++ { 0x84, "Anti-replay REPLAY error" },
++ { 0x85, "Sequence number overflow" },
++ { 0x86, "Sigver invalid signature" },
++ { 0x87, "DSA Sign Illegal test descriptor" },
++ { 0x88, "Protocol Format Error - A protocol has seen an error "
++ "in the format of data received. When running RSA, "
++ "this means that formatting with random padding was "
++ "used, and did not follow the form: 0x00, 0x02, 8-to-N "
++ "bytes of non-zero pad, 0x00, F data." },
++ { 0x89, "Protocol Size Error - A protocol has seen an error in "
++ "size. When running RSA, pdb size N < (size of F) when "
++ "no formatting is used; or pdb size N < (F + 11) when "
++ "formatting is used." },
++ { 0xC1, "Blob Command error: Undefined mode" },
++ { 0xC2, "Blob Command error: Secure Memory Blob mode error" },
++ { 0xC4, "Blob Command error: Black Blob key or input size "
++ "error" },
++ { 0xC5, "Blob Command error: Invalid key destination" },
++ { 0xC8, "Blob Command error: Trusted/Secure mode error" },
++ { 0xF0, "IPsec TTL or hop limit field either came in as 0, "
++ "or was decremented to 0" },
++ { 0xF1, "3GPP HFN matches or exceeds the Threshold" },
++ };
++ u8 desc_error = status & JRSTA_DECOERR_ERROR_MASK;
+ int i;
+
+- if (status & JRSTA_DECOERR_JUMP)
+- idx_str = "jump tgt desc idx";
+- else
+- idx_str = "desc idx";
++ report_jump_idx(status, outstr);
+
+ for (i = 0; i < ARRAY_SIZE(desc_error_list); i++)
+- if (desc_error_list[i].value == err_id)
++ if (desc_error_list[i].value == desc_error)
+ break;
+
+- if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text)
+- err_str = desc_error_list[i].error_text;
+- else
+- snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+-
+- dev_err(jrdev, "%08x: %s: %s %d: %s%s\n",
+- status, error, idx_str, idx, err_str, err_err_code);
++ if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) {
++ SPRINTFCAT(outstr, "%s", desc_error_list[i].error_text,
++ strlen(desc_error_list[i].error_text));
++ } else {
++ SPRINTFCAT(outstr, "unidentified error value 0x%02x",
++ desc_error, sizeof("ff"));
++ }
+ }
+
+-static void report_jr_status(struct device *jrdev, const u32 status,
+- const char *error)
++static void report_jr_status(u32 status, char *outstr)
+ {
+- dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+- status, error, __func__);
++ SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+ }
+
+-static void report_cond_code_status(struct device *jrdev, const u32 status,
+- const char *error)
++static void report_cond_code_status(u32 status, char *outstr)
+ {
+- dev_err(jrdev, "%08x: %s: %s() not implemented\n",
+- status, error, __func__);
++ SPRINTFCAT(outstr, "%s() not implemented", __func__, sizeof(__func__));
+ }
+
+-void caam_jr_strstatus(struct device *jrdev, u32 status)
++char *caam_jr_strstatus(char *outstr, u32 status)
+ {
+ static const struct stat_src {
+- void (*report_ssed)(struct device *jrdev, const u32 status,
+- const char *error);
+- const char *error;
+- } status_src[16] = {
++ void (*report_ssed)(u32 status, char *outstr);
++ char *error;
++ } status_src[] = {
+ { NULL, "No error" },
+ { NULL, NULL },
+ { report_ccb_status, "CCB" },
+ { report_jump_status, "Jump" },
+ { report_deco_status, "DECO" },
+- { NULL, "Queue Manager Interface" },
++ { NULL, NULL },
+ { report_jr_status, "Job Ring" },
+ { report_cond_code_status, "Condition Code" },
+- { NULL, NULL },
+- { NULL, NULL },
+- { NULL, NULL },
+- { NULL, NULL },
+- { NULL, NULL },
+- { NULL, NULL },
+- { NULL, NULL },
+- { NULL, NULL },
+ };
+ u32 ssrc = status >> JRSTA_SSRC_SHIFT;
+- const char *error = status_src[ssrc].error;
+
+- /*
+- * If there is an error handling function, call it to report the error.
+- * Otherwise print the error source name.
+- */
++ sprintf(outstr, "%s: ", status_src[ssrc].error);
++
+ if (status_src[ssrc].report_ssed)
+- status_src[ssrc].report_ssed(jrdev, status, error);
+- else if (error)
+- dev_err(jrdev, "%d: %s\n", ssrc, error);
+- else
+- dev_err(jrdev, "%d: unknown error source\n", ssrc);
++ status_src[ssrc].report_ssed(status, outstr);
++
++ return outstr;
+ }
+ EXPORT_SYMBOL(caam_jr_strstatus);
+diff -Nur linux-4.1.3/drivers/crypto/caam/error.h linux-xbian-imx6/drivers/crypto/caam/error.h
+--- linux-4.1.3/drivers/crypto/caam/error.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/error.h 2015-07-27 23:13:04.213947410 +0200
+@@ -7,5 +7,5 @@
+ #ifndef CAAM_ERROR_H
+ #define CAAM_ERROR_H
+ #define CAAM_ERROR_STR_MAX 302
+-void caam_jr_strstatus(struct device *jrdev, u32 status);
++extern char *caam_jr_strstatus(char *outstr, u32 status);
+ #endif /* CAAM_ERROR_H */
+diff -Nur linux-4.1.3/drivers/crypto/caam/intern.h linux-xbian-imx6/drivers/crypto/caam/intern.h
+--- linux-4.1.3/drivers/crypto/caam/intern.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/intern.h 2015-07-27 23:13:04.213947410 +0200
+@@ -2,13 +2,19 @@
+ * CAAM/SEC 4.x driver backend
+ * Private/internal definitions between modules
+ *
+- * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+ #ifndef INTERN_H
+ #define INTERN_H
+
++#define JOBR_UNASSIGNED 0
++#define JOBR_ASSIGNED 1
++
++/* Default clock/sample settings for an RNG4 entropy source */
++#define RNG4_ENT_CLOCKS_SAMPLE 1600
++
+ /* Currently comes from Kconfig param as a ^2 (driver-required) */
+ #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
+
+@@ -37,15 +43,13 @@
+
+ /* Private sub-storage for a single JobR */
+ struct caam_drv_private_jr {
+- struct list_head list_node; /* Job Ring device list */
+- struct device *dev;
++ struct device *parentdev; /* points back to controller dev */
++ struct platform_device *jr_pdev;/* points to platform device for JR */
+ int ridx;
+ struct caam_job_ring __iomem *rregs; /* JobR's register space */
+ struct tasklet_struct irqtask;
+ int irq; /* One per queue */
+-
+- /* Number of scatterlist crypt transforms active on the JobR */
+- atomic_t tfm_count ____cacheline_aligned;
++ int assign; /* busy/free */
+
+ /* Job ring info */
+ int ringsize; /* Size of rings (assume input = output) */
+@@ -66,15 +70,20 @@
+ struct caam_drv_private {
+
+ struct device *dev;
+- struct platform_device **jrpdev; /* Alloc'ed array per sub-device */
++ struct device *smdev;
++ struct device *secviodev;
++ struct device **jrdev; /* Alloc'ed array per sub-device */
++ spinlock_t jr_alloc_lock;
+ struct platform_device *pdev;
+
+ /* Physical-presence section */
+- struct caam_ctrl __iomem *ctrl; /* controller region */
+- struct caam_deco __iomem *deco; /* DECO/CCB views */
+- struct caam_assurance __iomem *assure;
+- struct caam_queue_if __iomem *qi; /* QI control region */
+- struct caam_job_ring __iomem *jr[4]; /* JobR's register space */
++ struct caam_ctrl *ctrl; /* controller region */
++ struct caam_deco **deco; /* DECO/CCB views */
++ struct caam_assurance *ac;
++ struct caam_queue_if *qi; /* QI control region */
++ struct snvs_full __iomem *snvs; /* SNVS HP+LP register space */
++ dma_addr_t __iomem *sm_base; /* Secure memory storage base */
++ u32 sm_size;
+
+ /*
+ * Detected geometry block. Filled in from device tree if powerpc,
+@@ -83,14 +92,22 @@
+ u8 total_jobrs; /* Total Job Rings in device */
+ u8 qi_present; /* Nonzero if QI present in device */
+ int secvio_irq; /* Security violation interrupt number */
+- int virt_en; /* Virtualization enabled in CAAM */
+-
+-#define RNG4_MAX_HANDLES 2
+- /* RNG4 block */
+- u32 rng4_sh_init; /* This bitmap shows which of the State
+- Handles of the RNG4 block are initialized
+- by this driver */
++ int rng_inst; /* Total instantiated RNGs */
+
++ /* which jr allocated to scatterlist crypto */
++ atomic_t tfm_count ____cacheline_aligned;
++ int num_jrs_for_algapi;
++ struct device **algapi_jr;
++ /* list of registered crypto algorithms (mk generic context handle?) */
++ struct list_head alg_list;
++ /* list of registered hash algorithms (mk generic context handle?) */
++ struct list_head hash_list;
++
++#ifdef CONFIG_ARM
++ struct clk *caam_ipg;
++ struct clk *caam_mem;
++ struct clk *caam_aclk;
++#endif
+ /*
+ * debugfs entries for developer view into driver/device
+ * variables at runtime.
+diff -Nur linux-4.1.3/drivers/crypto/caam/jr.c linux-xbian-imx6/drivers/crypto/caam/jr.c
+--- linux-4.1.3/drivers/crypto/caam/jr.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/jr.c 2015-07-27 23:13:04.213947410 +0200
+@@ -2,125 +2,15 @@
+ * CAAM/SEC 4.x transport/backend driver
+ * JobR backend functionality
+ *
+- * Copyright 2008-2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ */
+
+-#include <linux/of_irq.h>
+-#include <linux/of_address.h>
+-
+ #include "compat.h"
+ #include "regs.h"
+ #include "jr.h"
+ #include "desc.h"
+ #include "intern.h"
+
+-struct jr_driver_data {
+- /* List of Physical JobR's with the Driver */
+- struct list_head jr_list;
+- spinlock_t jr_alloc_lock; /* jr_list lock */
+-} ____cacheline_aligned;
+-
+-static struct jr_driver_data driver_data;
+-
+-static int caam_reset_hw_jr(struct device *dev)
+-{
+- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+- unsigned int timeout = 100000;
+-
+- /*
+- * mask interrupts since we are going to poll
+- * for reset completion status
+- */
+- setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+-
+- /* initiate flush (required prior to reset) */
+- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+- while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
+- JRINT_ERR_HALT_INPROGRESS) && --timeout)
+- cpu_relax();
+-
+- if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
+- JRINT_ERR_HALT_COMPLETE || timeout == 0) {
+- dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
+- return -EIO;
+- }
+-
+- /* initiate reset */
+- timeout = 100000;
+- wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
+- while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
+- cpu_relax();
+-
+- if (timeout == 0) {
+- dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
+- return -EIO;
+- }
+-
+- /* unmask interrupts */
+- clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
+-
+- return 0;
+-}
+-
+-/*
+- * Shutdown JobR independent of platform property code
+- */
+-int caam_jr_shutdown(struct device *dev)
+-{
+- struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+- dma_addr_t inpbusaddr, outbusaddr;
+- int ret;
+-
+- ret = caam_reset_hw_jr(dev);
+-
+- tasklet_kill(&jrp->irqtask);
+-
+- /* Release interrupt */
+- free_irq(jrp->irq, dev);
+-
+- /* Free rings */
+- inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
+- outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+- dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+- jrp->inpring, inpbusaddr);
+- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+- jrp->outring, outbusaddr);
+- kfree(jrp->entinfo);
+-
+- return ret;
+-}
+-
+-static int caam_jr_remove(struct platform_device *pdev)
+-{
+- int ret;
+- struct device *jrdev;
+- struct caam_drv_private_jr *jrpriv;
+-
+- jrdev = &pdev->dev;
+- jrpriv = dev_get_drvdata(jrdev);
+-
+- /*
+- * Return EBUSY if job ring already allocated.
+- */
+- if (atomic_read(&jrpriv->tfm_count)) {
+- dev_err(jrdev, "Device is busy\n");
+- return -EBUSY;
+- }
+-
+- /* Remove the node from Physical JobR list maintained by driver */
+- spin_lock(&driver_data.jr_alloc_lock);
+- list_del(&jrpriv->list_node);
+- spin_unlock(&driver_data.jr_alloc_lock);
+-
+- /* Release ring */
+- ret = caam_jr_shutdown(jrdev);
+- if (ret)
+- dev_err(jrdev, "Failed to shut down job ring\n");
+- irq_dispose_mapping(jrpriv->irq);
+-
+- return ret;
+-}
+-
+ /* Main per-ring interrupt handler */
+ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
+ {
+@@ -168,6 +58,9 @@
+ void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
+ u32 *userdesc, userstatus;
+ void *userarg;
++ dma_addr_t outbusaddr;
++
++ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+
+ while (rd_reg32(&jrp->rregs->outring_used)) {
+
+@@ -177,10 +70,15 @@
+
+ sw_idx = tail = jrp->tail;
+ hw_idx = jrp->out_ring_read_index;
++ dma_sync_single_for_cpu(dev, outbusaddr,
++ sizeof(struct jr_outentry) * JOBR_DEPTH,
++ DMA_FROM_DEVICE);
+
+ for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
+ sw_idx = (tail + i) & (JOBR_DEPTH - 1);
+
++ smp_read_barrier_depends();
++
+ if (jrp->outring[hw_idx].desc ==
+ jrp->entinfo[sw_idx].desc_addr_dma)
+ break; /* found */
+@@ -202,6 +100,8 @@
+ userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
+ userstatus = jrp->outring[hw_idx].jrstatus;
+
++ smp_mb();
++
+ /* set done */
+ wr_reg32(&jrp->rregs->outring_rmvd, 1);
+
+@@ -216,6 +116,7 @@
+ if (sw_idx == tail) {
+ do {
+ tail = (tail + 1) & (JOBR_DEPTH - 1);
++ smp_read_barrier_depends();
+ } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
+ jrp->entinfo[tail].desc_addr_dma == 0);
+
+@@ -233,57 +134,70 @@
+ }
+
+ /**
+- * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
+- *
+- * returns : pointer to the newly allocated physical
+- * JobR dev can be written to if successful.
++ * caam_jr_register() - Alloc a ring for someone to use as needed. Returns
++ * an ordinal of the rings allocated, else returns -ENODEV if no rings
++ * are available.
++ * @ctrldev: points to the controller level dev (parent) that
++ * owns rings available for use.
++ * @dev: points to where a pointer to the newly allocated queue's
++ * dev can be written to if successful.
+ **/
+-struct device *caam_jr_alloc(void)
++int caam_jr_register(struct device *ctrldev, struct device **rdev)
+ {
+- struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+- struct device *dev = NULL;
+- int min_tfm_cnt = INT_MAX;
+- int tfm_cnt;
+-
+- spin_lock(&driver_data.jr_alloc_lock);
+-
+- if (list_empty(&driver_data.jr_list)) {
+- spin_unlock(&driver_data.jr_alloc_lock);
+- return ERR_PTR(-ENODEV);
+- }
+-
+- list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
+- tfm_cnt = atomic_read(&jrpriv->tfm_count);
+- if (tfm_cnt < min_tfm_cnt) {
+- min_tfm_cnt = tfm_cnt;
+- min_jrpriv = jrpriv;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
++ struct caam_drv_private_jr *jrpriv = NULL;
++ int ring;
++
++ /* Lock, if free ring - assign, unlock */
++ spin_lock(&ctrlpriv->jr_alloc_lock);
++ for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
++ jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
++ if (jrpriv->assign == JOBR_UNASSIGNED) {
++ jrpriv->assign = JOBR_ASSIGNED;
++ *rdev = ctrlpriv->jrdev[ring];
++ spin_unlock(&ctrlpriv->jr_alloc_lock);
++ return ring;
+ }
+- if (!min_tfm_cnt)
+- break;
+ }
+
+- if (min_jrpriv) {
+- atomic_inc(&min_jrpriv->tfm_count);
+- dev = min_jrpriv->dev;
+- }
+- spin_unlock(&driver_data.jr_alloc_lock);
++ /* If assigned, write dev where caller needs it */
++ spin_unlock(&ctrlpriv->jr_alloc_lock);
++ *rdev = NULL;
+
+- return dev;
++ return -ENODEV;
+ }
+-EXPORT_SYMBOL(caam_jr_alloc);
++EXPORT_SYMBOL(caam_jr_register);
+
+ /**
+- * caam_jr_free() - Free the Job Ring
+- * @rdev - points to the dev that identifies the Job ring to
+- * be released.
++ * caam_jr_deregister() - Deregister an API and release the queue.
++ * Returns 0 if OK, -EBUSY if queue still contains pending entries
++ * or unprocessed results at the time of the call
++ * @dev - points to the dev that identifies the queue to
++ * be released.
+ **/
+-void caam_jr_free(struct device *rdev)
++int caam_jr_deregister(struct device *rdev)
+ {
+ struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
++ struct caam_drv_private *ctrlpriv;
++
++ /* Get the owning controller's private space */
++ ctrlpriv = dev_get_drvdata(jrpriv->parentdev);
++
++ /*
++ * Make sure ring empty before release
++ */
++ if (rd_reg32(&jrpriv->rregs->outring_used) ||
++ (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH))
++ return -EBUSY;
+
+- atomic_dec(&jrpriv->tfm_count);
++ /* Release ring */
++ spin_lock(&ctrlpriv->jr_alloc_lock);
++ jrpriv->assign = JOBR_UNASSIGNED;
++ spin_unlock(&ctrlpriv->jr_alloc_lock);
++
++ return 0;
+ }
+-EXPORT_SYMBOL(caam_jr_free);
++EXPORT_SYMBOL(caam_jr_deregister);
+
+ /**
+ * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK,
+@@ -321,7 +235,7 @@
+ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
+ struct caam_jrentry_info *head_entry;
+ int head, tail, desc_size;
+- dma_addr_t desc_dma;
++ dma_addr_t desc_dma, inpbusaddr;
+
+ desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32);
+ desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
+@@ -330,6 +244,13 @@
+ return -EIO;
+ }
+
++ dma_sync_single_for_device(dev, desc_dma, desc_size, DMA_TO_DEVICE);
++
++ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
++ dma_sync_single_for_device(dev, inpbusaddr,
++ sizeof(dma_addr_t) * JOBR_DEPTH,
++ DMA_TO_DEVICE);
++
+ spin_lock_bh(&jrp->inplock);
+
+ head = jrp->head;
+@@ -351,12 +272,18 @@
+
+ jrp->inpring[jrp->inp_ring_write_index] = desc_dma;
+
++ dma_sync_single_for_device(dev, inpbusaddr,
++ sizeof(dma_addr_t) * JOBR_DEPTH,
++ DMA_TO_DEVICE);
++
+ smp_wmb();
+
+ jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) &
+ (JOBR_DEPTH - 1);
+ jrp->head = (head + 1) & (JOBR_DEPTH - 1);
+
++ wmb();
++
+ wr_reg32(&jrp->rregs->inpring_jobadd, 1);
+
+ spin_unlock_bh(&jrp->inplock);
+@@ -365,6 +292,46 @@
+ }
+ EXPORT_SYMBOL(caam_jr_enqueue);
+
++static int caam_reset_hw_jr(struct device *dev)
++{
++ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
++ unsigned int timeout = 100000;
++
++ /*
++ * mask interrupts since we are going to poll
++ * for reset completion status
++ */
++ setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
++
++ /* initiate flush (required prior to reset) */
++ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
++ while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
++ JRINT_ERR_HALT_INPROGRESS) && --timeout)
++ cpu_relax();
++
++ if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
++ JRINT_ERR_HALT_COMPLETE || timeout == 0) {
++ dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
++ return -EIO;
++ }
++
++ /* initiate reset */
++ timeout = 100000;
++ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
++ while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
++ cpu_relax();
++
++ if (timeout == 0) {
++ dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
++ return -EIO;
++ }
++
++ /* unmask interrupts */
++ clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK);
++
++ return 0;
++}
++
+ /*
+ * Init JobR independent of platform property detection
+ */
+@@ -380,32 +347,34 @@
+
+ /* Connect job ring interrupt handler. */
+ error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED,
+- dev_name(dev), dev);
++ "caam-jobr", dev);
+ if (error) {
+ dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
+ jrp->ridx, jrp->irq);
+- goto out_kill_deq;
++ irq_dispose_mapping(jrp->irq);
++ jrp->irq = 0;
++ return -EINVAL;
+ }
+
+ error = caam_reset_hw_jr(dev);
+ if (error)
+- goto out_free_irq;
++ return error;
+
+- error = -ENOMEM;
+ jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ &inpbusaddr, GFP_KERNEL);
+- if (!jrp->inpring)
+- goto out_free_irq;
+
+ jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
+ JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+- if (!jrp->outring)
+- goto out_free_inpring;
+
+ jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
+ GFP_KERNEL);
+- if (!jrp->entinfo)
+- goto out_free_outring;
++
++ if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
++ (jrp->entinfo == NULL)) {
++ dev_err(dev, "can't allocate job rings for %d\n",
++ jrp->ridx);
++ return -ENOMEM;
++ }
+
+ for (i = 0; i < JOBR_DEPTH; i++)
+ jrp->entinfo[i].desc_addr_dma = !0;
+@@ -431,120 +400,123 @@
+ (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
+ (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
+
++ jrp->assign = JOBR_UNASSIGNED;
+ return 0;
++}
+
+-out_free_outring:
+- dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+- jrp->outring, outbusaddr);
+-out_free_inpring:
++/*
++ * Shutdown JobR independent of platform property code
++ */
++int caam_jr_shutdown(struct device *dev)
++{
++ struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
++ dma_addr_t inpbusaddr, outbusaddr;
++ int ret;
++
++ ret = caam_reset_hw_jr(dev);
++
++ tasklet_kill(&jrp->irqtask);
++
++ /* Release interrupt */
++ free_irq(jrp->irq, dev);
++
++ /* Free rings */
++ inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
++ outbusaddr = rd_reg64(&jrp->rregs->outring_base);
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+- dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
+-out_free_irq:
+- free_irq(jrp->irq, dev);
+-out_kill_deq:
+- tasklet_kill(&jrp->irqtask);
+- return error;
+-}
++ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
++ jrp->outring, outbusaddr);
++ kfree(jrp->entinfo);
++ of_device_unregister(jrp->jr_pdev);
+
++ return ret;
++}
+
+ /*
+- * Probe routine for each detected JobR subsystem.
++ * Probe routine for each detected JobR subsystem. It assumes that
++ * property detection was picked up externally.
+ */
+-static int caam_jr_probe(struct platform_device *pdev)
++int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
++ int ring)
+ {
+- struct device *jrdev;
+- struct device_node *nprop;
+- struct caam_job_ring __iomem *ctrl;
++ struct device *ctrldev, *jrdev;
++ struct platform_device *jr_pdev;
++ struct caam_drv_private *ctrlpriv;
+ struct caam_drv_private_jr *jrpriv;
+- static int total_jobrs;
++ const __be32 *jroffset_addr;
++ u32 jroffset;
+ int error;
+
+- jrdev = &pdev->dev;
+- jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
+- GFP_KERNEL);
+- if (!jrpriv)
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++
++ jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
++ GFP_KERNEL);
++ if (jrpriv == NULL) {
++ dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
++ ring);
+ return -ENOMEM;
++ }
++ jrpriv->parentdev = ctrldev; /* point back to parent */
++ jrpriv->ridx = ring; /* save ring identity relative to detection */
+
+- dev_set_drvdata(jrdev, jrpriv);
++ /*
++ * Derive a pointer to the detected JobRs regs
++ * Driver has already iomapped the entire space, we just
++ * need to add in the offset to this JobR. Don't know if I
++ * like this long-term, but it'll run
++ */
++ jroffset_addr = of_get_property(np, "reg", NULL);
+
+- /* save ring identity relative to detection */
+- jrpriv->ridx = total_jobrs++;
++ if (jroffset_addr == NULL) {
++ kfree(jrpriv);
++ return -EINVAL;
++ }
+
+- nprop = pdev->dev.of_node;
+- /* Get configuration properties from device tree */
+- /* First, get register page */
+- ctrl = of_iomap(nprop, 0);
+- if (!ctrl) {
+- dev_err(jrdev, "of_iomap() failed\n");
+- return -ENOMEM;
++ /*
++ * Fix the endianness of this value read from the device
++ * tree if running on ARM.
++ */
++ jroffset = be32_to_cpup(jroffset_addr);
++
++ jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
++ + jroffset);
++
++ /* Build a local dev for each detected queue */
++ jr_pdev = of_platform_device_create(np, NULL, ctrldev);
++ if (jr_pdev == NULL) {
++ kfree(jrpriv);
++ return -EINVAL;
+ }
+
+- jrpriv->rregs = (struct caam_job_ring __force *)ctrl;
++ jrpriv->jr_pdev = jr_pdev;
++ jrdev = &jr_pdev->dev;
++ dev_set_drvdata(jrdev, jrpriv);
++ ctrlpriv->jrdev[ring] = jrdev;
+
+ if (sizeof(dma_addr_t) == sizeof(u64))
+- if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
+- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
++ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring"))
++ dma_set_mask(jrdev, DMA_BIT_MASK(40));
+ else
+- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
++ dma_set_mask(jrdev, DMA_BIT_MASK(36));
+ else
+- dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
++ dma_set_mask(jrdev, DMA_BIT_MASK(32));
+
+ /* Identify the interrupt */
+- jrpriv->irq = irq_of_parse_and_map(nprop, 0);
++ jrpriv->irq = of_irq_to_resource(np, 0, NULL);
++ if (jrpriv->irq <= 0) {
++ kfree(jrpriv);
++ return -EINVAL;
++ }
+
+ /* Now do the platform independent part */
+ error = caam_jr_init(jrdev); /* now turn on hardware */
+ if (error) {
+- irq_dispose_mapping(jrpriv->irq);
++ of_device_unregister(jr_pdev);
++ kfree(jrpriv);
+ return error;
+ }
+
+- jrpriv->dev = jrdev;
+- spin_lock(&driver_data.jr_alloc_lock);
+- list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
+- spin_unlock(&driver_data.jr_alloc_lock);
+-
+- atomic_set(&jrpriv->tfm_count, 0);
+-
+- return 0;
+-}
+-
+-static struct of_device_id caam_jr_match[] = {
+- {
+- .compatible = "fsl,sec-v4.0-job-ring",
+- },
+- {
+- .compatible = "fsl,sec4.0-job-ring",
+- },
+- {},
+-};
+-MODULE_DEVICE_TABLE(of, caam_jr_match);
+-
+-static struct platform_driver caam_jr_driver = {
+- .driver = {
+- .name = "caam_jr",
+- .of_match_table = caam_jr_match,
+- },
+- .probe = caam_jr_probe,
+- .remove = caam_jr_remove,
+-};
+-
+-static int __init jr_driver_init(void)
+-{
+- spin_lock_init(&driver_data.jr_alloc_lock);
+- INIT_LIST_HEAD(&driver_data.jr_list);
+- return platform_driver_register(&caam_jr_driver);
+-}
+-
+-static void __exit jr_driver_exit(void)
+-{
+- platform_driver_unregister(&caam_jr_driver);
++ return error;
+ }
+-
+-module_init(jr_driver_init);
+-module_exit(jr_driver_exit);
+-
+-MODULE_LICENSE("GPL");
+-MODULE_DESCRIPTION("FSL CAAM JR request backend");
+-MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
+diff -Nur linux-4.1.3/drivers/crypto/caam/jr.h linux-xbian-imx6/drivers/crypto/caam/jr.h
+--- linux-4.1.3/drivers/crypto/caam/jr.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/jr.h 2015-07-27 23:13:04.213947410 +0200
+@@ -1,18 +1,22 @@
+ /*
+ * CAAM public-level include definitions for the JobR backend
+ *
+- * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ */
+
+ #ifndef JR_H
+ #define JR_H
+
+ /* Prototypes for backend-level services exposed to APIs */
+-struct device *caam_jr_alloc(void);
+-void caam_jr_free(struct device *rdev);
++int caam_jr_register(struct device *ctrldev, struct device **rdev);
++int caam_jr_deregister(struct device *rdev);
+ int caam_jr_enqueue(struct device *dev, u32 *desc,
+ void (*cbk)(struct device *dev, u32 *desc, u32 status,
+ void *areq),
+ void *areq);
+
++extern int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
++ int ring);
++extern int caam_jr_shutdown(struct device *dev);
++extern struct device *caam_get_jrdev(void);
+ #endif /* JR_H */
+diff -Nur linux-4.1.3/drivers/crypto/caam/Kconfig linux-xbian-imx6/drivers/crypto/caam/Kconfig
+--- linux-4.1.3/drivers/crypto/caam/Kconfig 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/Kconfig 2015-07-27 23:13:04.205975852 +0200
+@@ -1,32 +1,19 @@
+ config CRYPTO_DEV_FSL_CAAM
+ tristate "Freescale CAAM-Multicore driver backend"
+- depends on FSL_SOC
++ depends on FSL_SOC || ARCH_MXC
+ help
+ Enables the driver module for Freescale's Cryptographic Accelerator
+ and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
+- This module creates job ring devices, and configures h/w
++ This module adds a job ring operation interface, and configures h/w
+ to operate as a DPAA component automatically, depending
+ on h/w feature availability.
+
+ To compile this driver as a module, choose M here: the module
+ will be called caam.
+
+-config CRYPTO_DEV_FSL_CAAM_JR
+- tristate "Freescale CAAM Job Ring driver backend"
+- depends on CRYPTO_DEV_FSL_CAAM
+- default y
+- help
+- Enables the driver module for Job Rings which are part of
+- Freescale's Cryptographic Accelerator
+- and Assurance Module (CAAM). This module adds a job ring operation
+- interface.
+-
+- To compile this driver as a module, choose M here: the module
+- will be called caam_jr.
+-
+ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
+ int "Job Ring size"
+- depends on CRYPTO_DEV_FSL_CAAM_JR
++ depends on CRYPTO_DEV_FSL_CAAM
+ range 2 9
+ default "9"
+ help
+@@ -44,7 +31,7 @@
+
+ config CRYPTO_DEV_FSL_CAAM_INTC
+ bool "Job Ring interrupt coalescing"
+- depends on CRYPTO_DEV_FSL_CAAM_JR
++ depends on CRYPTO_DEV_FSL_CAAM
+ default n
+ help
+ Enable the Job Ring's interrupt coalescing feature.
+@@ -75,7 +62,7 @@
+
+ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
+ tristate "Register algorithm implementations with the Crypto API"
+- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
++ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ select CRYPTO_ALGAPI
+ select CRYPTO_AUTHENC
+@@ -89,7 +76,7 @@
+
+ config CRYPTO_DEV_FSL_CAAM_AHASH_API
+ tristate "Register hash algorithm implementations with Crypto API"
+- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
++ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ select CRYPTO_HASH
+ help
+@@ -101,7 +88,7 @@
+
+ config CRYPTO_DEV_FSL_CAAM_RNG_API
+ tristate "Register caam device for hwrng API"
+- depends on CRYPTO_DEV_FSL_CAAM && CRYPTO_DEV_FSL_CAAM_JR
++ depends on CRYPTO_DEV_FSL_CAAM
+ default y
+ select CRYPTO_RNG
+ select HW_RANDOM
+@@ -112,6 +99,54 @@
+ To compile this as a module, choose M here: the module
+ will be called caamrng.
+
++config CRYPTO_DEV_FSL_CAAM_RNG_TEST
++ boolean "Test caam rng"
++ depends on CRYPTO_DEV_FSL_CAAM_RNG_API
++ default n
++ help
++ Selecting this will enable self-test for caam rng.
++
++config CRYPTO_DEV_FSL_CAAM_SM
++ tristate "CAAM Secure Memory / Keystore API (EXPERIMENTAL)"
++ default n
++ help
++ Enables use of a prototype kernel-level Keystore API with CAAM
++ Secure Memory for insertion/extraction of bus-protected secrets.
++
++config CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE
++ int "Size of each keystore slot in Secure Memory"
++ depends on CRYPTO_DEV_FSL_CAAM_SM
++ range 5 9
++ default 7
++ help
++ Select size of allocation units to divide Secure Memory pages into
++ (the size of a "slot" as referenced inside the API code).
++ Established as powers of two.
++ Examples:
++ 5 => 32 bytes
++ 6 => 64 bytes
++ 7 => 128 bytes
++ 8 => 256 bytes
++ 9 => 512 bytes
++
++config CRYPTO_DEV_FSL_CAAM_SM_TEST
++ tristate "CAAM Secure Memory - Keystore Test/Example (EXPERIMENTAL)"
++ depends on CRYPTO_DEV_FSL_CAAM_SM
++ default n
++ help
++ Example thread to exercise the Keystore API and to verify that
++ stored and recovered secrets can be used for general purpose
++ encryption/decryption.
++
++config CRYPTO_DEV_FSL_CAAM_SECVIO
++ tristate "CAAM/SNVS Security Violation Handler (EXPERIMENTAL)"
++ depends on CRYPTO_DEV_FSL_CAAM
++ default n
++ help
++ Enables installation of an interrupt handler with registrable
++ handler functions which can be specified to act on the consequences
++ of a security violation.
++
+ config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ depends on CRYPTO_DEV_FSL_CAAM
+@@ -119,3 +154,19 @@
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
++
++config CRYPTO_DEV_FSL_CAAM_KEYBLOB
++ tristate "Freescale CAAM memory keyblob driver backend"
++ depends on CRYPTO_DEV_FSL_CAAM
++ depends on CRYPTO_DEV_FSL_CAAM_JR
++ default y
++ help
++ Enables the driver module for Key Blob which are part of
++ Freescale's Cryptographic Accelerator
++ and Assurance Module (CAAM). This module adds a key blob operation
++ interface.
++
++ To compile this driver as a module, choose M here: the module
++ will be called caam_keyblob.
++
++
+diff -Nur linux-4.1.3/drivers/crypto/caam/key_gen.c linux-xbian-imx6/drivers/crypto/caam/key_gen.c
+--- linux-4.1.3/drivers/crypto/caam/key_gen.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/key_gen.c 2015-07-27 23:13:04.213947410 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * CAAM/SEC 4.x functions for handling key-generation jobs
+ *
+- * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ *
+ */
+ #include "compat.h"
+@@ -19,8 +19,11 @@
+ dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+ #endif
+
+- if (err)
+- caam_jr_strstatus(dev, err);
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++
++ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
+
+ res->err = err;
+
+@@ -48,29 +51,24 @@
+ u32 *desc;
+ struct split_key_result result;
+ dma_addr_t dma_addr_in, dma_addr_out;
+- int ret = -ENOMEM;
++ int ret = 0;
+
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ if (!desc) {
+ dev_err(jrdev, "unable to allocate key input memory\n");
+- return ret;
++ return -ENOMEM;
+ }
+
++ init_job_desc(desc, 0);
++
+ dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, dma_addr_in)) {
+ dev_err(jrdev, "unable to map key input memory\n");
+- goto out_free;
++ kfree(desc);
++ return -ENOMEM;
+ }
+-
+- dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+- DMA_FROM_DEVICE);
+- if (dma_mapping_error(jrdev, dma_addr_out)) {
+- dev_err(jrdev, "unable to map key output memory\n");
+- goto out_unmap_in;
+- }
+-
+- init_job_desc(desc, 0);
++ dma_sync_single_for_device(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /* Sets MDHA up into an HMAC-INIT */
+@@ -91,9 +89,9 @@
+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
+
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
+- print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
+ #endif
+
+@@ -106,12 +104,13 @@
+ wait_for_completion_interruptible(&result.completion);
+ ret = result.err;
+ #ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
++ print_hex_dump(KERN_ERR, "ctx.key@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
+ split_key_pad_len, 1);
+ #endif
+ }
+-
++ dma_sync_single_for_cpu(jrdev, dma_addr_out, split_key_pad_len,
++ DMA_FROM_DEVICE);
+ dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+ DMA_FROM_DEVICE);
+ out_unmap_in:
+diff -Nur linux-4.1.3/drivers/crypto/caam/Makefile linux-xbian-imx6/drivers/crypto/caam/Makefile
+--- linux-4.1.3/drivers/crypto/caam/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/Makefile 2015-07-27 23:13:04.205975852 +0200
+@@ -1,15 +1,14 @@
+ #
+ # Makefile for the CAAM backend and dependent components
+ #
+-ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
+- EXTRA_CFLAGS := -DDEBUG
+-endif
+
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
+-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM) += sm_store.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SM_TEST) += sm_test.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_SECVIO) += secvio.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_KEYBLOB) += caam_keyblob.o
+
+-caam-objs := ctrl.o
+-caam_jr-objs := jr.o key_gen.o error.o
++caam-objs := ctrl.o jr.o error.o key_gen.o
+diff -Nur linux-4.1.3/drivers/crypto/caam/pdb.h linux-xbian-imx6/drivers/crypto/caam/pdb.h
+--- linux-4.1.3/drivers/crypto/caam/pdb.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/pdb.h 2015-07-27 23:13:04.213947410 +0200
+@@ -44,7 +44,6 @@
+ #define PDBOPTS_ESP_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
+ #define PDBOPTS_ESP_INCIPHDR 0x04 /* Prepend IP header to output frame */
+ #define PDBOPTS_ESP_IPVSN 0x02 /* process IPv6 header */
+-#define PDBOPTS_ESP_AOFL 0x04 /* adjust out frame len (decap, SEC>=5.3)*/
+ #define PDBOPTS_ESP_TUNNEL 0x01 /* tunnel mode next-header byte */
+ #define PDBOPTS_ESP_IPV6 0x02 /* ip header version is V6 */
+ #define PDBOPTS_ESP_DIFFSERV 0x40 /* copy TOS/TC from inner iphdr */
+diff -Nur linux-4.1.3/drivers/crypto/caam/regs.h linux-xbian-imx6/drivers/crypto/caam/regs.h
+--- linux-4.1.3/drivers/crypto/caam/regs.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/regs.h 2015-07-27 23:13:04.213947410 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * CAAM hardware register-level view
+ *
+- * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ */
+
+ #ifndef REGS_H
+@@ -74,17 +74,22 @@
+ #endif
+ #else
+ #ifdef __LITTLE_ENDIAN
+-#define wr_reg32(reg, data) __raw_writel(data, reg)
+-#define rd_reg32(reg) __raw_readl(reg)
++#define wr_reg32(reg, data) writel(data, reg)
++#define rd_reg32(reg) readl(reg)
+ #ifdef CONFIG_64BIT
+-#define wr_reg64(reg, data) __raw_writeq(data, reg)
+-#define rd_reg64(reg) __raw_readq(reg)
++#define wr_reg64(reg, data) writeq(data, reg)
++#define rd_reg64(reg) readq(reg)
+ #endif
+ #endif
+ #endif
+
++#ifdef CONFIG_ARM
++/* These are common macros for Power, put here for ARMs */
++#define setbits32(_addr, _v) writel((readl(_addr) | (_v)), (_addr))
++#define clrbits32(_addr, _v) writel((readl(_addr) & ~(_v)), (_addr))
++#endif
++
+ #ifndef CONFIG_64BIT
+-#ifdef __BIG_ENDIAN
+ static inline void wr_reg64(u64 __iomem *reg, u64 data)
+ {
+ wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
+@@ -96,21 +101,6 @@
+ return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
+ ((u64)rd_reg32((u32 __iomem *)reg + 1));
+ }
+-#else
+-#ifdef __LITTLE_ENDIAN
+-static inline void wr_reg64(u64 __iomem *reg, u64 data)
+-{
+- wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
+- wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
+-}
+-
+-static inline u64 rd_reg64(u64 __iomem *reg)
+-{
+- return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
+- ((u64)rd_reg32((u32 __iomem *)reg));
+-}
+-#endif
+-#endif
+ #endif
+
+ /*
+@@ -123,6 +113,98 @@
+ } __packed;
+
+ /*
++ * CHA version ID / instantiation bitfields
++ * Defined for use within cha_id in perfmon
++ * Note that the same shift/mask selectors can be used to pull out number
++ * of instantiated blocks within cha_num in perfmon, the locations are
++ * the same.
++ */
++
++/* Job Ring */
++#define CHA_ID_JR_SHIFT 60
++#define CHA_ID_JR_MASK (0xfull << CHA_ID_JR_SHIFT)
++
++/* DEscriptor COntroller */
++#define CHA_ID_DECO_SHIFT 56
++#define CHA_ID_DECO_MASK (0xfull << CHA_ID_DECO_SHIFT)
++#define CHA_NUM_DECONUM_SHIFT 56 /* legacy definition */
++#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
++
++/* ZUC-Authentication */
++#define CHA_ID_ZA_SHIFT 44
++#define CHA_ID_ZA_MASK (0xfull << CHA_ID_ZA_SHIFT)
++
++/* ZUC-Encryption */
++#define CHA_ID_ZE_SHIFT 40
++#define CHA_ID_ZE_MASK (0xfull << CHA_ID_ZE_SHIFT)
++
++/* SNOW f9 */
++#define CHA_ID_SNW9_SHIFT 36
++#define CHA_ID_SNW9_MASK (0xfull << CHA_ID_SNW9_SHIFT)
++
++/* CRC */
++#define CHA_ID_CRC_SHIFT 32
++#define CHA_ID_CRC_MASK (0xfull << CHA_ID_CRC_SHIFT)
++
++/* Public Key */
++#define CHA_ID_PK_SHIFT 28
++#define CHA_ID_PK_MASK (0xfull << CHA_ID_PK_SHIFT)
++
++/* Kasumi */
++#define CHA_ID_KAS_SHIFT 24
++#define CHA_ID_KAS_MASK (0xfull << CHA_ID_KAS_SHIFT)
++
++/* SNOW f8 */
++#define CHA_ID_SNW8_SHIFT 20
++#define CHA_ID_SNW8_MASK (0xfull << CHA_ID_SNW8_SHIFT)
++
++/*
++ * Random Generator
++ * RNG4 = FIPS-verification-compliant, requires init kickstart for use
++ */
++#define CHA_ID_RNG_SHIFT 16
++#define CHA_ID_RNG_MASK (0xfull << CHA_ID_RNG_SHIFT)
++#define CHA_ID_RNG_A (0x1ull << CHA_ID_RNG_SHIFT)
++#define CHA_ID_RNG_B (0x2ull << CHA_ID_RNG_SHIFT)
++#define CHA_ID_RNG_C (0x3ull << CHA_ID_RNG_SHIFT)
++#define CHA_ID_RNG_4 (0x4ull << CHA_ID_RNG_SHIFT)
++
++/*
++ * Message Digest
++ * LP256 = Low Power (MD5/SHA1/SHA224/SHA256 + HMAC)
++ * LP512 = Low Power (LP256 + SHA384/SHA512)
++ * HP = High Power (LP512 + SMAC)
++ */
++#define CHA_ID_MD_SHIFT 12
++#define CHA_ID_MD_MASK (0xfull << CHA_ID_MD_SHIFT)
++#define CHA_ID_MD_LP256 (0x0ull << CHA_ID_MD_SHIFT)
++#define CHA_ID_MD_LP512 (0x1ull << CHA_ID_MD_SHIFT)
++#define CHA_ID_MD_HP (0x2ull << CHA_ID_MD_SHIFT)
++
++/* ARC4 Streamcipher */
++#define CHA_ID_ARC4_SHIFT 8
++#define CHA_ID_ARC4_MASK (0xfull << CHA_ID_ARC4_SHIFT)
++#define CHA_ID_ARC4_LP (0x0ull << CHA_ID_ARC4_SHIFT)
++#define CHA_ID_ARC4_HP (0x1ull << CHA_ID_ARC4_SHIFT)
++
++/* DES Blockcipher Accelerator */
++#define CHA_ID_DES_SHIFT 4
++#define CHA_ID_DES_MASK (0xfull << CHA_ID_DES_SHIFT)
++
++/*
++ * AES Blockcipher + Combo Mode Accelerator
++ * LP = Low Power (includes ECB/CBC/CFB128/OFB/CTR/CCM/CMAC/XCBC-MAC)
++ * HP = High Power (LP + CBCXCBC/CTRXCBC/XTS/GCM)
++ * DIFFPWR = ORed in if differential-power-analysis resistance implemented
++ */
++#define CHA_ID_AES_SHIFT 0
++#define CHA_ID_AES_MASK (0xfull << CHA_ID_AES_SHIFT)
++#define CHA_ID_AES_LP (0x3ull << CHA_ID_AES_SHIFT)
++#define CHA_ID_AES_HP (0x4ull << CHA_ID_AES_SHIFT)
++#define CHA_ID_AES_DIFFPWR (0x1ull << CHA_ID_AES_SHIFT)
++
++
++/*
+ * caam_perfmon - Performance Monitor/Secure Memory Status/
+ * CAAM Global Status/Component Version IDs
+ *
+@@ -130,45 +212,8 @@
+ */
+
+ /* Number of DECOs */
+-#define CHA_NUM_MS_DECONUM_SHIFT 24
+-#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
+-
+-/* CHA Version IDs */
+-#define CHA_ID_LS_AES_SHIFT 0
+-#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
+-
+-#define CHA_ID_LS_DES_SHIFT 4
+-#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
+-
+-#define CHA_ID_LS_ARC4_SHIFT 8
+-#define CHA_ID_LS_ARC4_MASK (0xfull << CHA_ID_LS_ARC4_SHIFT)
+-
+-#define CHA_ID_LS_MD_SHIFT 12
+-#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
+-
+-#define CHA_ID_LS_RNG_SHIFT 16
+-#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
+-
+-#define CHA_ID_LS_SNW8_SHIFT 20
+-#define CHA_ID_LS_SNW8_MASK (0xfull << CHA_ID_LS_SNW8_SHIFT)
+-
+-#define CHA_ID_LS_KAS_SHIFT 24
+-#define CHA_ID_LS_KAS_MASK (0xfull << CHA_ID_LS_KAS_SHIFT)
+-
+-#define CHA_ID_LS_PK_SHIFT 28
+-#define CHA_ID_LS_PK_MASK (0xfull << CHA_ID_LS_PK_SHIFT)
+-
+-#define CHA_ID_MS_CRC_SHIFT 0
+-#define CHA_ID_MS_CRC_MASK (0xfull << CHA_ID_MS_CRC_SHIFT)
+-
+-#define CHA_ID_MS_SNW9_SHIFT 4
+-#define CHA_ID_MS_SNW9_MASK (0xfull << CHA_ID_MS_SNW9_SHIFT)
+-
+-#define CHA_ID_MS_DECO_SHIFT 24
+-#define CHA_ID_MS_DECO_MASK (0xfull << CHA_ID_MS_DECO_SHIFT)
+-
+-#define CHA_ID_MS_JR_SHIFT 28
+-#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
++#define CHA_NUM_DECONUM_SHIFT 56
++#define CHA_NUM_DECONUM_MASK (0xfull << CHA_NUM_DECONUM_SHIFT)
+
+ struct sec_vid {
+ u16 ip_id;
+@@ -176,6 +221,10 @@
+ u8 min_rev;
+ };
+
++#define SEC_VID_IPID_SHIFT 16
++#define SEC_VID_MAJ_SHIFT 8
++#define SEC_VID_MAJ_MASK 0xFF00
++
+ struct caam_perfmon {
+ /* Performance Monitor Registers f00-f9f */
+ u64 req_dequeued; /* PC_REQ_DEQ - Dequeued Requests */
+@@ -188,36 +237,89 @@
+ u64 rsvd[13];
+
+ /* CAAM Hardware Instantiation Parameters fa0-fbf */
+- u32 cha_rev_ms; /* CRNR - CHA Rev No. Most significant half*/
+- u32 cha_rev_ls; /* CRNR - CHA Rev No. Least significant half*/
+-#define CTPR_MS_QI_SHIFT 25
+-#define CTPR_MS_QI_MASK (0x1ull << CTPR_MS_QI_SHIFT)
+-#define CTPR_MS_VIRT_EN_INCL 0x00000001
+-#define CTPR_MS_VIRT_EN_POR 0x00000002
+-#define CTPR_MS_PG_SZ_MASK 0x10
+-#define CTPR_MS_PG_SZ_SHIFT 4
+- u32 comp_parms_ms; /* CTPR - Compile Parameters Register */
+- u32 comp_parms_ls; /* CTPR - Compile Parameters Register */
+- u64 rsvd1[2];
++ u64 cha_rev; /* CRNR - CHA Revision Number */
++#define CTPR_QI_SHIFT 57
++#define CTPR_QI_MASK (0x1ull << CTPR_QI_SHIFT)
++ u64 comp_parms; /* CTPR - Compile Parameters Register */
++
++ /* Secure Memory State Visibility */
++ u32 rsvd1;
++ u32 smstatus; /* Secure memory status */
++ u32 rsvd2;
++ u32 smpartown; /* Secure memory partition owner */
+
+ /* CAAM Global Status fc0-fdf */
+ u64 faultaddr; /* FAR - Fault Address */
+ u32 faultliodn; /* FALR - Fault Address LIODN */
+ u32 faultdetail; /* FADR - Fault Addr Detail */
+- u32 rsvd2;
++ u32 rsvd3;
+ u32 status; /* CSTA - CAAM Status */
+- u64 rsvd3;
++ u32 smpart; /* Secure Memory Partition Parameters */
++ u32 smvid; /* Secure Memory Version ID */
+
+ /* Component Instantiation Parameters fe0-fff */
+ u32 rtic_id; /* RVID - RTIC Version ID */
+ u32 ccb_id; /* CCBVID - CCB Version ID */
+- u32 cha_id_ms; /* CHAVID - CHA Version ID Most Significant*/
+- u32 cha_id_ls; /* CHAVID - CHA Version ID Least Significant*/
+- u32 cha_num_ms; /* CHANUM - CHA Number Most Significant */
+- u32 cha_num_ls; /* CHANUM - CHA Number Least Significant*/
+- u32 caam_id_ms; /* CAAMVID - CAAM Version ID MS */
+- u32 caam_id_ls; /* CAAMVID - CAAM Version ID LS */
+-};
++ u64 cha_id; /* CHAVID - CHA Version ID */
++ u64 cha_num; /* CHANUM - CHA Number */
++ u64 caam_id; /* CAAMVID - CAAM Version ID */
++};
++
++#define SMSTATUS_PART_SHIFT 28
++#define SMSTATUS_PART_MASK (0xf << SMSTATUS_PART_SHIFT)
++#define SMSTATUS_PAGE_SHIFT 16
++#define SMSTATUS_PAGE_MASK (0x7ff << SMSTATUS_PAGE_SHIFT)
++#define SMSTATUS_MID_SHIFT 8
++#define SMSTATUS_MID_MASK (0x3f << SMSTATUS_MID_SHIFT)
++#define SMSTATUS_ACCERR_SHIFT 4
++#define SMSTATUS_ACCERR_MASK (0xf << SMSTATUS_ACCERR_SHIFT)
++#define SMSTATUS_ACCERR_NONE 0
++#define SMSTATUS_ACCERR_ALLOC 1 /* Page not allocated */
++#define SMSTATUS_ACCESS_ID 2 /* Not granted by ID */
++#define SMSTATUS_ACCESS_WRITE 3 /* Writes not allowed */
++#define SMSTATUS_ACCESS_READ 4 /* Reads not allowed */
++#define SMSTATUS_ACCESS_NONKEY 6 /* Non-key reads not allowed */
++#define SMSTATUS_ACCESS_BLOB 9 /* Blob access not allowed */
++#define SMSTATUS_ACCESS_DESCB 10 /* Descriptor Blob access spans pages */
++#define SMSTATUS_ACCESS_NON_SM 11 /* Outside Secure Memory range */
++#define SMSTATUS_ACCESS_XPAGE 12 /* Access crosses pages */
++#define SMSTATUS_ACCESS_INITPG 13 /* Page still initializing */
++#define SMSTATUS_STATE_SHIFT 0
++#define SMSTATUS_STATE_MASK (0xf << SMSTATUS_STATE_SHIFT)
++#define SMSTATUS_STATE_RESET 0
++#define SMSTATUS_STATE_INIT 1
++#define SMSTATUS_STATE_NORMAL 2
++#define SMSTATUS_STATE_FAIL 3
++
++/* up to 15 rings, 2 bits shifted by ring number */
++#define SMPARTOWN_RING_SHIFT 2
++#define SMPARTOWN_RING_MASK 3
++#define SMPARTOWN_AVAILABLE 0
++#define SMPARTOWN_NOEXIST 1
++#define SMPARTOWN_UNAVAILABLE 2
++#define SMPARTOWN_OURS 3
++
++/* Maximum number of pages possible */
++#define SMPART_MAX_NUMPG_SHIFT 16
++#define SMPART_MAX_NUMPG_MASK (0x3f << SMPART_MAX_NUMPG_SHIFT)
++
++/* Maximum partition number */
++#define SMPART_MAX_PNUM_SHIFT 12
++#define SMPART_MAX_PNUM_MASK (0xf << SMPART_MAX_PNUM_SHIFT)
++
++/* Highest possible page number */
++#define SMPART_MAX_PG_SHIFT 0
++#define SMPART_MAX_PG_MASK (0x3f << SMPART_MAX_PG_SHIFT)
++
++/* Max size of a page */
++#define SMVID_PG_SIZE_SHIFT 16
++#define SMVID_PG_SIZE_MASK (0x7 << SMVID_PG_SIZE_SHIFT)
++
++/* Major/Minor Version ID */
++#define SMVID_MAJ_VERS_SHIFT 8
++#define SMVID_MAJ_VERS (0xf << SMVID_MAJ_VERS_SHIFT)
++#define SMVID_MIN_VERS_SHIFT 0
++#define SMVID_MIN_VERS (0xf << SMVID_MIN_VERS_SHIFT)
+
+ /* LIODN programming for DMA configuration */
+ #define MSTRID_LOCK_LIODN 0x80000000
+@@ -270,17 +372,7 @@
+
+ /* RNG4 TRNG test registers */
+ struct rng4tst {
+-#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+-#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_SC 0 /* use von Neumann data in
+- both entropy shifter and
+- statistical checker */
+-#define RTMCTL_SAMP_MODE_RAW_ES_SC 1 /* use raw data in both
+- entropy shifter and
+- statistical checker */
+-#define RTMCTL_SAMP_MODE_VON_NEUMANN_ES_RAW_SC 2 /* use von Neumann data in
+- entropy shifter, raw data
+- in statistical checker */
+-#define RTMCTL_SAMP_MODE_INVALID 3 /* invalid combination */
++#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+ u32 rtmctl; /* misc. control register */
+ u32 rtscmisc; /* statistical check misc. register */
+ u32 rtpkrrng; /* poker range register */
+@@ -290,26 +382,22 @@
+ };
+ #define RTSDCTL_ENT_DLY_SHIFT 16
+ #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+-#define RTSDCTL_ENT_DLY_MIN 3200
+-#define RTSDCTL_ENT_DLY_MAX 12800
+ u32 rtsdctl; /* seed control register */
+ union {
+ u32 rtsblim; /* PRGM=1: sparse bit limit register */
+ u32 rttotsam; /* PRGM=0: total samples register */
+ };
+ u32 rtfrqmin; /* frequency count min. limit register */
+-#define RTFRQMAX_DISABLE (1 << 20)
+ union {
+ u32 rtfrqmax; /* PRGM=1: freq. count max. limit register */
+ u32 rtfrqcnt; /* PRGM=0: freq. count register */
+ };
+ u32 rsvd1[40];
+-#define RDSTA_SKVT 0x80000000
+-#define RDSTA_SKVN 0x40000000
+-#define RDSTA_IF0 0x00000001
+-#define RDSTA_IF1 0x00000002
+-#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
+- u32 rdsta;
++#define RDSTA_IF 0x00000003 /* state handle instantiated flags 0 and 1 */
++#define RDSTA_SKVN 0x40000000 /* Secure Key Valid Non-Test mode */
++#define RDSTA_SKVT 0x80000000 /* Secure Key Valid Test. non-test mode */
++#define RDSTA_TF 0x00000300 /* State handle instantiated Test-mode */
++ u32 rdsta; /* DRNG status register */
+ u32 rsvd2[15];
+ };
+
+@@ -340,12 +428,9 @@
+ /* Bus Access Configuration Section 010-11f */
+ /* Read/Writable */
+ struct masterid jr_mid[4]; /* JRxLIODNR - JobR LIODN setup */
+- u32 rsvd3[11];
+- u32 jrstart; /* JRSTART - Job Ring Start Register */
++ u32 rsvd3[12];
+ struct masterid rtic_mid[4]; /* RTICxLIODNR - RTIC LIODN setup */
+- u32 rsvd4[5];
+- u32 deco_rsr; /* DECORSR - Deco Request Source */
+- u32 rsvd11;
++ u32 rsvd4[7];
+ u32 deco_rq; /* DECORR - DECO Request */
+ struct partid deco_mid[5]; /* DECOxLIODNR - 1 per DECO */
+ u32 rsvd5[22];
+@@ -386,11 +471,6 @@
+ #define MCFGR_DMA_RESET 0x10000000
+ #define MCFGR_LONG_PTR 0x00010000 /* Use >32-bit desc addressing */
+ #define SCFGR_RDBENABLE 0x00000400
+-#define SCFGR_VIRT_EN 0x00008000
+-#define DECORR_RQD0ENABLE 0x00000001 /* Enable DECO0 for direct access */
+-#define DECORSR_JR0 0x00000001 /* JR to supply TZ, SDID, ICID */
+-#define DECORSR_VALID 0x80000000
+-#define DECORR_DEN0 0x00010000 /* DECO0 available for access*/
+
+ /* AXI read cache control */
+ #define MCFGR_ARCACHE_SHIFT 12
+@@ -407,12 +487,6 @@
+ #define MCFGR_AXIPRI 0x00000008 /* Assert AXI priority sideband */
+ #define MCFGR_BURST_64 0x00000001 /* Max burst size */
+
+-/* JRSTART register offsets */
+-#define JRSTART_JR0_START 0x00000001 /* Start Job ring 0 */
+-#define JRSTART_JR1_START 0x00000002 /* Start Job ring 1 */
+-#define JRSTART_JR2_START 0x00000004 /* Start Job ring 2 */
+-#define JRSTART_JR3_START 0x00000008 /* Start Job ring 3 */
+-
+ /*
+ * caam_job_ring - direct job ring setup
+ * 1-4 possible per instantiation, base + 1000/2000/3000/4000
+@@ -455,7 +529,18 @@
+ u32 rsvd11;
+ u32 jrcommand; /* JRCRx - JobR command */
+
+- u32 rsvd12[932];
++ u32 rsvd12[33];
++
++ /* Secure Memory Configuration - if you have it */
++ u32 sm_cmd; /* SMCJRx - Secure memory command */
++ u32 rsvd13;
++ u32 sm_status; /* SMCSJRx - Secure memory status */
++ u32 rsvd14;
++ u32 sm_perm; /* SMAPJRx - Secure memory access perms */
++ u32 sm_group2; /* SMAP2JRx - Secure memory access group 2 */
++ u32 sm_group1; /* SMAP1JRx - Secure memory access group 1 */
++
++ u32 rsvd15[891];
+
+ /* Performance Monitor f00-fff */
+ struct caam_perfmon perfmon;
+@@ -578,6 +663,62 @@
+
+ #define JRCR_RESET 0x01
+
++/* secure memory command */
++#define SMC_PAGE_SHIFT 16
++#define SMC_PAGE_MASK (0xffff << SMC_PAGE_SHIFT)
++#define SMC_PART_SHIFT 8
++#define SMC_PART_MASK (0x0f << SMC_PART_SHIFT)
++#define SMC_CMD_SHIFT 0
++#define SMC_CMD_MASK (0x0f << SMC_CMD_SHIFT)
++
++#define SMC_CMD_ALLOC_PAGE 0x01 /* allocate page to this partition */
++#define SMC_CMD_DEALLOC_PAGE 0x02 /* deallocate page from partition */
++#define SMC_CMD_DEALLOC_PART 0x03 /* deallocate partition */
++#define SMC_CMD_PAGE_INQUIRY 0x05 /* find partition associate with page */
++
++/* secure memory (command) status */
++#define SMCS_PAGE_SHIFT 16
++#define SMCS_PAGE_MASK (0x0fff << SMCS_PAGE_SHIFT)
++#define SMCS_CMDERR_SHIFT 14
++#define SMCS_CMDERR_MASK (3 << SMCS_CMDERR_SHIFT)
++#define SMCS_ALCERR_SHIFT 12
++#define SMCS_ALCERR_MASK (3 << SMCS_ALCERR_SHIFT)
++#define SMCS_PGOWN_SHIFT 6
++#define SMCS_PGWON_MASK (3 << SMCS_PGOWN_SHIFT)
++#define SMCS_PART_SHIFT 0
++#define SMCS_PART_MASK (0xf << SMCS_PART_SHIFT)
++
++#define SMCS_CMDERR_NONE 0
++#define SMCS_CMDERR_INCOMP 1 /* Command not yet complete */
++#define SMCS_CMDERR_SECFAIL 2 /* Security failure occurred */
++#define SMCS_CMDERR_OVERFLOW 3 /* Command overflow */
++
++#define SMCS_ALCERR_NONE 0
++#define SMCS_ALCERR_PSPERR 1 /* Partion marked PSP (dealloc only) */
++#define SMCS_ALCERR_PAGEAVAIL 2 /* Page not available */
++#define SMCS_ALCERR_PARTOWN 3 /* Partition ownership error */
++
++#define SMCS_PGOWN_AVAIL 0 /* Page is available */
++#define SMCS_PGOWN_NOEXIST 1 /* Page initializing or nonexistent */
++#define SMCS_PGOWN_NOOWN 2 /* Page owned by another processor */
++#define SMCS_PGOWN_OWNED 3 /* Page belongs to this processor */
++
++/* secure memory access permissions */
++#define SMCS_PERM_KEYMOD_SHIFT 16
++#define SMCA_PERM_KEYMOD_MASK (0xff << SMCS_PERM_KEYMOD_SHIFT)
++#define SMCA_PERM_CSP_ZERO 0x8000 /* Zero when deallocated or released */
++#define SMCA_PERM_PSP_LOCK 0x4000 /* Part./pages can't be deallocated */
++#define SMCA_PERM_PERM_LOCK 0x2000 /* Lock permissions */
++#define SMCA_PERM_GRP_LOCK 0x1000 /* Lock access groups */
++#define SMCA_PERM_RINGID_SHIFT 10
++#define SMCA_PERM_RINGID_MASK (3 << SMCA_PERM_RINGID_SHIFT)
++#define SMCA_PERM_G2_BLOB 0x0080 /* Group 2 blob import/export */
++#define SMCA_PERM_G2_WRITE 0x0020 /* Group 2 write */
++#define SMCA_PERM_G2_READ 0x0010 /* Group 2 read */
++#define SMCA_PERM_G1_BLOB 0x0008 /* Group 1... */
++#define SMCA_PERM_G1_WRITE 0x0002
++#define SMCA_PERM_G1_READ 0x0001
++
+ /*
+ * caam_assurance - Assurance Controller View
+ * base + 0x6000 padded out to 0x1000
+@@ -746,7 +887,6 @@
+ u32 jr_ctl_hi; /* CxJRR - JobR Control Register @800 */
+ u32 jr_ctl_lo;
+ u64 jr_descaddr; /* CxDADR - JobR Descriptor Address */
+-#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
+ u32 op_status_hi; /* DxOPSTA - DECO Operation Status */
+ u32 op_status_lo;
+ u32 rsvd24[2];
+@@ -760,21 +900,36 @@
+ struct deco_sg_table sctr_tbl[4]; /* DxSTR - Scatter Tables */
+ u32 rsvd29[48];
+ u32 descbuf[64]; /* DxDESB - Descriptor buffer */
+- u32 rscvd30[193];
+-#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
+-#define DESC_DBG_DECO_STAT_VALID 0x80000000
+-#define DESC_DBG_DECO_STAT_MASK 0x00F00000
+- u32 desc_dbg; /* DxDDR - DECO Debug Register */
+- u32 rsvd31[126];
+-};
+-
+-#define DECO_JQCR_WHL 0x20000000
+-#define DECO_JQCR_FOUR 0x10000000
+-
+-#define JR_BLOCK_NUMBER 1
+-#define ASSURE_BLOCK_NUMBER 6
+-#define QI_BLOCK_NUMBER 7
+-#define DECO_BLOCK_NUMBER 8
+-#define PG_SIZE_4K 0x1000
+-#define PG_SIZE_64K 0x10000
++ u32 rsvd30[320];
++};
++
++/*
++ * Current top-level view of memory map is:
++ *
++ * 0x0000 - 0x0fff - CAAM Top-Level Control
++ * 0x1000 - 0x1fff - Job Ring 0
++ * 0x2000 - 0x2fff - Job Ring 1
++ * 0x3000 - 0x3fff - Job Ring 2
++ * 0x4000 - 0x4fff - Job Ring 3
++ * 0x5000 - 0x5fff - (unused)
++ * 0x6000 - 0x6fff - Assurance Controller
++ * 0x7000 - 0x7fff - Queue Interface
++ * 0x8000 - 0x8fff - DECO-CCB 0
++ * 0x9000 - 0x9fff - DECO-CCB 1
++ * 0xa000 - 0xafff - DECO-CCB 2
++ * 0xb000 - 0xbfff - DECO-CCB 3
++ * 0xc000 - 0xcfff - DECO-CCB 4
++ *
++ * caam_full describes the full register view of CAAM if useful,
++ * although many configurations may choose to implement parts of
++ * the register map separately, in differing privilege regions
++ */
++struct caam_full {
++ struct caam_ctrl __iomem ctrl;
++ struct caam_job_ring jr[4];
++ u64 rsvd[512];
++ struct caam_assurance assure;
++ struct caam_queue_if qi;
++};
++
+ #endif /* REGS_H */
+diff -Nur linux-4.1.3/drivers/crypto/caam/secvio.c linux-xbian-imx6/drivers/crypto/caam/secvio.c
+--- linux-4.1.3/drivers/crypto/caam/secvio.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/crypto/caam/secvio.c 2015-07-27 23:13:04.213947410 +0200
+@@ -0,0 +1,290 @@
++
++/*
++ * SNVS Security Violation Handler
++ * Copyright (C) 2012-2015 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#include "compat.h"
++#include "intern.h"
++#include "secvio.h"
++#include "regs.h"
++
++/*
++ * These names are associated with each violation handler.
++ * The source names were taken from MX6, and are based on recommendations
++ * for most common SoCs.
++ */
++static const u8 *violation_src_name[] = {
++ "CAAM Internal Security Violation",
++ "JTAG Alarm",
++ "Watchdog",
++ "(reserved)",
++ "External Boot",
++ "External Tamper Detect",
++};
++
++/* These names help describe security monitor state for the console */
++static const u8 *snvs_ssm_state_name[] = {
++ "init",
++ "hard fail",
++ "(undef:2)",
++ "soft fail",
++ "(undef:4)",
++ "(undef:5)",
++ "(undef:6)",
++ "(undef:7)",
++ "transition",
++ "check",
++ "(undef:10)",
++ "non-secure",
++ "(undef:12)",
++ "trusted",
++ "(undef:14)",
++ "secure",
++};
++
++/* Top-level security violation interrupt */
++static irqreturn_t snvs_secvio_interrupt(int irq, void *snvsdev)
++{
++ struct device *dev = snvsdev;
++ struct snvs_secvio_drv_private *svpriv = dev_get_drvdata(dev);
++
++ /* Check the HP secvio status register */
++ svpriv->irqcause = rd_reg32(&svpriv->svregs->hp.secvio_status) &
++ HP_SECVIOST_SECVIOMASK;
++
++ if (!svpriv->irqcause)
++ return IRQ_NONE;
++
++ /* Now ACK cause */
++ setbits32(&svpriv->svregs->hp.secvio_status, svpriv->irqcause);
++
++ /* And run deferred service */
++ preempt_disable();
++ tasklet_schedule(&svpriv->irqtask[smp_processor_id()]);
++ preempt_enable();
++
++ return IRQ_HANDLED;
++}
++
++/* Deferred service handler. Tasklet arg is simply the SNVS dev */
++static void snvs_secvio_dispatch(unsigned long indev)
++{
++ struct device *dev = (struct device *)indev;
++ struct snvs_secvio_drv_private *svpriv = dev_get_drvdata(dev);
++ unsigned long flags;
++ int i;
++
++
++ /* Look through stored causes, call each handler if exists */
++ for (i = 0; i < MAX_SECVIO_SOURCES; i++)
++ if (svpriv->irqcause & (1 << i)) {
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[i].handler(dev, i,
++ svpriv->intsrc[i].ext);
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++ };
++
++ /* Re-enable now-serviced interrupts */
++ setbits32(&svpriv->svregs->hp.secvio_intcfg, svpriv->irqcause);
++}
++
++/*
++ * Default cause handler, used in lieu of an application-defined handler.
++ * All it does at this time is print a console message. It could force a halt.
++ */
++static void snvs_secvio_default(struct device *dev, u32 cause, void *ext)
++{
++ struct snvs_secvio_drv_private *svpriv = dev_get_drvdata(dev);
++
++ dev_err(dev, "Unhandled Security Violation Interrupt %d = %s\n",
++ cause, svpriv->intsrc[cause].intname);
++}
++
++/*
++ * Install an application-defined handler for a specified cause
++ * Arguments:
++ * - dev points to SNVS-owning device
++ * - cause interrupt source cause
++ * - handler application-defined handler, gets called with dev
++ * source cause, and locally-defined handler argument
++ * - cause_description points to a string to override the default cause
++ * name, this can be used as an alternate for error
++ * messages and such. If left NULL, the default
++ * description string is used.
++ * - ext pointer to any extra data needed by the handler.
++ */
++int snvs_secvio_install_handler(struct device *dev, enum secvio_cause cause,
++ void (*handler)(struct device *dev, u32 cause,
++ void *ext),
++ u8 *cause_description, void *ext)
++{
++ unsigned long flags;
++ struct snvs_secvio_drv_private *svpriv;
++
++ svpriv = dev_get_drvdata(dev);
++
++ if ((handler == NULL) || (cause > SECVIO_CAUSE_SOURCE_5))
++ return -EINVAL;
++
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[cause].handler = handler;
++ if (cause_description != NULL)
++ svpriv->intsrc[cause].intname = cause_description;
++ if (ext != NULL)
++ svpriv->intsrc[cause].ext = ext;
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(snvs_secvio_install_handler);
++
++/*
++ * Remove an application-defined handler for a specified cause (and, by
++ * implication, restore the "default".
++ * Arguments:
++ * - dev points to SNVS-owning device
++ * - cause interrupt source cause
++ */
++int snvs_secvio_remove_handler(struct device *dev, enum secvio_cause cause)
++{
++ unsigned long flags;
++ struct snvs_secvio_drv_private *svpriv;
++
++ svpriv = dev_get_drvdata(dev);
++
++ if (cause > SECVIO_CAUSE_SOURCE_5)
++ return -EINVAL;
++
++ spin_lock_irqsave(&svpriv->svlock, flags);
++ svpriv->intsrc[cause].intname = violation_src_name[cause];
++ svpriv->intsrc[cause].handler = snvs_secvio_default;
++ svpriv->intsrc[cause].ext = NULL;
++ spin_unlock_irqrestore(&svpriv->svlock, flags);
++ return 0;
++}
++EXPORT_SYMBOL(snvs_secvio_remove_handler);
++
++static int snvs_secvio_remove(struct platform_device *pdev)
++{
++ struct device *svdev;
++ struct snvs_secvio_drv_private *svpriv;
++ int i;
++
++ svdev = &pdev->dev;
++ svpriv = dev_get_drvdata(svdev);
++
++ /* Set all sources to nonfatal */
++ wr_reg32(&svpriv->svregs->hp.secvio_intcfg, 0);
++
++ /* Remove tasklets and release interrupt */
++ for_each_possible_cpu(i)
++ tasklet_kill(&svpriv->irqtask[i]);
++
++ free_irq(svpriv->irq, svdev);
++ iounmap(svpriv->svregs);
++ kfree(svpriv);
++
++ return 0;
++}
++
++static int snvs_secvio_probe(struct platform_device *pdev)
++{
++ struct device *svdev;
++ struct snvs_secvio_drv_private *svpriv;
++ struct device_node *np, *npirq;
++ struct snvs_full __iomem *snvsregs;
++ int i, error;
++ u32 hpstate;
++
++ svpriv = kzalloc(sizeof(struct snvs_secvio_drv_private), GFP_KERNEL);
++ if (!svpriv)
++ return -ENOMEM;
++
++ svdev = &pdev->dev;
++ dev_set_drvdata(svdev, svpriv);
++ svpriv->pdev = pdev;
++ np = pdev->dev.of_node;
++
++ npirq = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-secvio");
++ if (!npirq) {
++ dev_err(svdev, "can't identify secvio interrupt\n");
++ kfree(svpriv);
++ return -EINVAL;
++ }
++ svpriv->irq = irq_of_parse_and_map(npirq, 0);
++ if (svpriv->irq <= 0) {
++ kfree(svpriv);
++ return -EINVAL;
++ }
++
++ snvsregs = of_iomap(np, 0);
++ if (!snvsregs) {
++ dev_err(svdev, "register mapping failed\n");
++ return -ENOMEM;
++ }
++ svpriv->svregs = (struct snvs_full __force *)snvsregs;
++
++ /* Device data set up. Now init interrupt source descriptions */
++ for (i = 0; i < MAX_SECVIO_SOURCES; i++) {
++ svpriv->intsrc[i].intname = violation_src_name[i];
++ svpriv->intsrc[i].handler = snvs_secvio_default;
++ }
++ /* Connect main handler */
++ for_each_possible_cpu(i)
++ tasklet_init(&svpriv->irqtask[i], snvs_secvio_dispatch,
++ (unsigned long)svdev);
++
++ error = request_irq(svpriv->irq, snvs_secvio_interrupt,
++ IRQF_SHARED, "snvs-secvio", svdev);
++ if (error) {
++ dev_err(svdev, "can't connect secvio interrupt\n");
++ irq_dispose_mapping(svpriv->irq);
++ svpriv->irq = 0;
++ iounmap(svpriv->svregs);
++ kfree(svpriv);
++ return -EINVAL;
++ }
++
++ /*
++ * Configure all sources as fatal violations except LP section,
++ * source #5 (typically used as an external tamper detect), and
++ * source #3 (typically unused). Whenever the transition to
++ * secure mode has occurred, these will now be "fatal" violations
++ */
++ wr_reg32(&svpriv->svregs->hp.secvio_intcfg,
++ HP_SECVIO_INTEN_SRC4 | HP_SECVIO_INTEN_SRC2 |
++ HP_SECVIO_INTEN_SRC1 | HP_SECVIO_INTEN_SRC0);
++
++ hpstate = (rd_reg32(&svpriv->svregs->hp.status) &
++ HP_STATUS_SSM_ST_MASK) >> HP_STATUS_SSM_ST_SHIFT;
++ dev_info(svdev, "violation handlers armed - %s state\n",
++ snvs_ssm_state_name[hpstate]);
++
++ return 0;
++}
++
++static struct of_device_id snvs_secvio_match[] = {
++ {
++ .compatible = "fsl,imx6q-caam-snvs",
++ },
++ {},
++};
++MODULE_DEVICE_TABLE(of, snvs_secvio_match);
++
++static struct platform_driver snvs_secvio_driver = {
++ .driver = {
++ .name = "snvs-secvio",
++ .owner = THIS_MODULE,
++ .of_match_table = snvs_secvio_match,
++ },
++ .probe = snvs_secvio_probe,
++ .remove = snvs_secvio_remove,
++};
++
++module_platform_driver(snvs_secvio_driver);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL SNVS Security Violation Handler");
++MODULE_AUTHOR("Freescale Semiconductor - MCU");
++
+diff -Nur linux-4.1.3/drivers/crypto/caam/secvio.h linux-xbian-imx6/drivers/crypto/caam/secvio.h
+--- linux-4.1.3/drivers/crypto/caam/secvio.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/crypto/caam/secvio.h 2015-07-27 23:13:04.213947410 +0200
+@@ -0,0 +1,66 @@
++
++/*
++ * CAAM Security Violation Handler
++ * Copyright (C) 2012-2014 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#ifndef SECVIO_H
++#define SECVIO_H
++
++#include "snvsregs.h"
++
++
++/*
++ * Defines the published interfaces to install/remove application-specified
++ * handlers for catching violations
++ */
++
++#define MAX_SECVIO_SOURCES 6
++
++/* these are the untranslated causes */
++enum secvio_cause {
++ SECVIO_CAUSE_SOURCE_0,
++ SECVIO_CAUSE_SOURCE_1,
++ SECVIO_CAUSE_SOURCE_2,
++ SECVIO_CAUSE_SOURCE_3,
++ SECVIO_CAUSE_SOURCE_4,
++ SECVIO_CAUSE_SOURCE_5
++};
++
++/* These are common "recommended" cause definitions for most devices */
++#define SECVIO_CAUSE_CAAM_VIOLATION SECVIO_CAUSE_SOURCE_0
++#define SECVIO_CAUSE_JTAG_ALARM SECVIO_CAUSE_SOURCE_1
++#define SECVIO_CAUSE_WATCHDOG SECVIO_CAUSE_SOURCE_2
++#define SECVIO_CAUSE_EXTERNAL_BOOT SECVIO_CAUSE_SOURCE_4
++#define SECVIO_CAUSE_TAMPER_DETECT SECVIO_CAUSE_SOURCE_5
++
++int snvs_secvio_install_handler(struct device *dev, enum secvio_cause cause,
++ void (*handler)(struct device *dev, u32 cause,
++ void *ext),
++ u8 *cause_description, void *ext);
++int snvs_secvio_remove_handler(struct device *dev, enum secvio_cause cause);
++
++/*
++ * Private data definitions for the secvio "driver"
++ */
++
++struct secvio_int_src {
++ const u8 *intname; /* Points to a descriptive name for source */
++ void *ext; /* Extended data to pass to the handler */
++ void (*handler)(struct device *dev, u32 cause, void *ext);
++};
++
++struct snvs_secvio_drv_private {
++ struct platform_device *pdev;
++ spinlock_t svlock ____cacheline_aligned;
++ struct tasklet_struct irqtask[NR_CPUS];
++ struct snvs_full __iomem *svregs; /* both HP and LP domains */
++ int irq;
++ u32 irqcause; /* stashed cause of violation interrupt */
++
++ /* Registered handlers for each violation */
++ struct secvio_int_src intsrc[MAX_SECVIO_SOURCES];
++
++};
++
++#endif /* SECVIO_H */
+diff -Nur linux-4.1.3/drivers/crypto/caam/sg_sw_sec4.h linux-xbian-imx6/drivers/crypto/caam/sg_sw_sec4.h
+--- linux-4.1.3/drivers/crypto/caam/sg_sw_sec4.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/crypto/caam/sg_sw_sec4.h 2015-07-27 23:13:04.213947410 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * CAAM/SEC 4.x functions for using scatterlists in caam driver
+ *
+- * Copyright 2008-2011 Freescale Semiconductor, Inc.
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
+ *
+ */
+
+@@ -91,13 +91,22 @@
+ {
+ if (unlikely(chained)) {
+ int i;
++ struct scatterlist *tsg = sg;
++
++ /* We use a local copy of the sg pointer to avoid moving the
++ * head of the list pointed to by sg as we wall the list.
++ */
+ for (i = 0; i < nents; i++) {
+- dma_map_sg(dev, sg, 1, dir);
+- sg = sg_next(sg);
++ dma_map_sg(dev, tsg, 1, dir);
++ tsg = sg_next(tsg);
+ }
+ } else {
+ dma_map_sg(dev, sg, nents, dir);
+ }
++
++ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ dma_sync_sg_for_device(dev, sg, nents, dir);
++
+ return nents;
+ }
+
+@@ -105,6 +114,9 @@
+ unsigned int nents, enum dma_data_direction dir,
+ bool chained)
+ {
++ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ dma_sync_sg_for_cpu(dev, sg, nents, dir);
++
+ if (unlikely(chained)) {
+ int i;
+ for (i = 0; i < nents; i++) {
+@@ -116,3 +128,41 @@
+ }
+ return nents;
+ }
++
++/* Copy from len bytes of sg to dest, starting from beginning */
++static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
++{
++ struct scatterlist *current_sg = sg;
++ int cpy_index = 0, next_cpy_index = current_sg->length;
++
++ while (next_cpy_index < len) {
++ memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
++ current_sg->length);
++ current_sg = sg_next(current_sg);
++ cpy_index = next_cpy_index;
++ next_cpy_index += current_sg->length;
++ }
++ if (cpy_index < len)
++ memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
++ len - cpy_index);
++}
++
++/* Copy sg data, from to_skip to end, to dest */
++static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
++ int to_skip, unsigned int end)
++{
++ struct scatterlist *current_sg = sg;
++ int sg_index, cpy_index;
++
++ sg_index = current_sg->length;
++ while (sg_index <= to_skip) {
++ current_sg = sg_next(current_sg);
++ sg_index += current_sg->length;
++ }
++ cpy_index = sg_index - to_skip;
++ memcpy(dest, (u8 *) sg_virt(current_sg) +
++ current_sg->length - cpy_index, cpy_index);
++ current_sg = sg_next(current_sg);
++ if (end - sg_index)
++ sg_copy(dest + cpy_index, current_sg, end - sg_index);
++}
+diff -Nur linux-4.1.3/drivers/crypto/caam/sm.h linux-xbian-imx6/drivers/crypto/caam/sm.h
+--- linux-4.1.3/drivers/crypto/caam/sm.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/crypto/caam/sm.h 2015-07-27 23:13:04.213947410 +0200
+@@ -0,0 +1,88 @@
++
++/*
++ * CAAM Secure Memory/Keywrap API Definitions
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
++ */
++
++#ifndef SM_H
++#define SM_H
++
++
++/* Storage access permissions */
++#define SM_PERM_READ 0x01
++#define SM_PERM_WRITE 0x02
++#define SM_PERM_BLOB 0x03
++
++
++/* Keystore maintenance functions */
++void sm_init_keystore(struct device *dev);
++u32 sm_detect_keystore_units(struct device *dev);
++int sm_establish_keystore(struct device *dev, u32 unit);
++void sm_release_keystore(struct device *dev, u32 unit);
++void caam_sm_shutdown(struct platform_device *pdev);
++int caam_sm_example_init(struct platform_device *pdev);
++
++/* Keystore accessor functions */
++extern int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size,
++ u32 *slot);
++extern int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot);
++extern int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
++ const u8 *key_data, u32 key_length);
++extern int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
++ u32 key_length, u8 *key_data);
++extern int sm_keystore_slot_encapsulate(struct device *dev, u32 unit,
++ u32 inslot, u32 outslot, u16 secretlen,
++ u8 *keymod, u16 keymodlen);
++extern int sm_keystore_slot_decapsulate(struct device *dev, u32 unit,
++ u32 inslot, u32 outslot, u16 secretlen,
++ u8 *keymod, u16 keymodlen);
++
++/* Data structure to hold per-slot information */
++struct keystore_data_slot_info {
++ u8 allocated; /* Track slot assignments */
++ u32 key_length; /* Size of the key */
++};
++
++/* Data structure to hold keystore information */
++struct keystore_data {
++ void *base_address; /* Base of the Secure Partition */
++ u32 slot_count; /* Number of slots in the keystore */
++ struct keystore_data_slot_info *slot; /* Per-slot information */
++};
++
++/* store the detected attributes of a secure memory page */
++struct sm_page_descriptor {
++ u16 phys_pagenum; /* may be discontiguous */
++ u16 own_part; /* Owning partition */
++ void *pg_base; /* Calculated virtual address */
++ struct keystore_data *ksdata;
++};
++
++struct caam_drv_private_sm {
++ struct device *parentdev; /* this ends up as the controller */
++ struct device *smringdev; /* ring that owns this instance */
++ spinlock_t kslock ____cacheline_aligned;
++
++ /* Default parameters for geometry */
++ u32 max_pages; /* maximum pages this instance can support */
++ u32 top_partition; /* highest partition number in this instance */
++ u32 top_page; /* highest page number in this instance */
++ u32 page_size; /* page size */
++ u32 slot_size; /* selected size of each storage block */
++
++ /* Partition/Page Allocation Map */
++ u32 localpages; /* Number of pages we can access */
++ struct sm_page_descriptor *pagedesc; /* Allocated per-page */
++
++ /* Installed handlers for keystore access */
++ int (*data_init)(struct device *dev, u32 unit);
++ void (*data_cleanup)(struct device *dev, u32 unit);
++ int (*slot_alloc)(struct device *dev, u32 unit, u32 size, u32 *slot);
++ int (*slot_dealloc)(struct device *dev, u32 unit, u32 slot);
++ void *(*slot_get_address)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_base)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_offset)(struct device *dev, u32 unit, u32 handle);
++ u32 (*slot_get_slot_size)(struct device *dev, u32 unit, u32 handle);
++};
++
++#endif /* SM_H */
+diff -Nur linux-4.1.3/drivers/crypto/caam/sm_store.c linux-xbian-imx6/drivers/crypto/caam/sm_store.c
+--- linux-4.1.3/drivers/crypto/caam/sm_store.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/crypto/caam/sm_store.c 2015-07-27 23:13:04.213947410 +0200
+@@ -0,0 +1,896 @@
++
++/*
++ * CAAM Secure Memory Storage Interface
++ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc.
++ *
++ * Loosely based on the SHW Keystore API for SCC/SCC2
++ * Experimental implementation and NOT intended for upstream use. Expect
++ * this interface to be amended significantly in the future once it becomes
++ * integrated into live applications.
++ *
++ * Known issues:
++ *
++ * - Executes one instance of an secure memory "driver". This is tied to the
++ * fact that job rings can't run as standalone instances in the present
++ * configuration.
++ *
++ * - It does not expose a userspace interface. The value of a userspace
++ * interface for access to secrets is a point for further architectural
++ * discussion.
++ *
++ * - Partition/permission management is not part of this interface. It
++ * depends on some level of "knowledge" agreed upon between bootloader,
++ * provisioning applications, and OS-hosted software (which uses this
++ * driver).
++ *
++ * - No means of identifying the location or purpose of secrets managed by
++ * this interface exists; "slot location" and format of a given secret
++ * needs to be agreed upon between bootloader, provisioner, and OS-hosted
++ * application.
++ */
++
++#include "compat.h"
++#include "regs.h"
++#include "jr.h"
++#include "desc.h"
++#include "intern.h"
++#include "error.h"
++#include "sm.h"
++
++#ifdef SM_DEBUG_CONT
++void sm_show_page(struct device *dev, struct sm_page_descriptor *pgdesc)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ u32 i, *smdata;
++
++ dev_info(dev, "physical page %d content at 0x%08x\n",
++ pgdesc->phys_pagenum, pgdesc->pg_base);
++ smdata = pgdesc->pg_base;
++ for (i = 0; i < (smpriv->page_size / sizeof(u32)); i += 4)
++ dev_info(dev, "[0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
++ (u32)&smdata[i], smdata[i], smdata[i+1], smdata[i+2],
++ smdata[i+3]);
++}
++#endif
++
++/*
++ * Construct a secure memory blob encapsulation job descriptor
++ *
++ * - desc pointer to hold new (to be allocated) pointer to the generated
++ * descriptor for later use. Calling thread can kfree the
++ * descriptor after execution.
++ * - keymod Physical pointer to key modifier (contiguous piece).
++ * - keymodsz Size of key modifier in bytes (should normally be 8).
++ * - secretbuf Physical pointer (within an accessible secure memory page)
++ * of the secret to be encapsulated.
++ * - outbuf Physical pointer (within an accessible secure memory page)
++ * of the encapsulated output. This will be larger than the
++ * input secret because of the added encapsulation data.
++ * - secretsz Size of input secret, in bytes.
++ * - auth If nonzero, use AES-CCM for encapsulation, else use ECB
++ *
++ * Note: this uses 32-bit pointers at present
++ */
++#define INITIAL_DESCSZ 16 /* size of tmp buffer for descriptor const. */
++static int blob_encap_desc(u32 **desc, dma_addr_t keymod, u16 keymodsz,
++ dma_addr_t secretbuf, dma_addr_t outbuf,
++ u16 secretsz, bool auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key modifier */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK) |
++ (keymodsz & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /* Encapsulate to secure memory */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | secretsz;
++ tmpdesc[idx++] = (u32)secretbuf;
++
++ /* Add space for BKEK and MAC tag */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (secretsz + (32 + 16));
++
++ tmpdesc[idx++] = (u32)outbuf;
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_ENCAP_PROTOCOL | OP_PCLID_BLOB |
++ OP_PCL_BLOB_PTXT_SECMEM;
++ if (auth)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Construct a secure memory blob decapsulation job descriptor
++ *
++ * - desc pointer to hold new (to be allocated) pointer to the generated
++ * descriptor for later use. Calling thread can kfree the
++ * descriptor after execution.
++ * - keymod Physical pointer to key modifier (contiguous piece).
++ * - keymodsz Size of key modifier in bytes (should normally be 16).
++ * - blobbuf Physical pointer (within an accessible secure memory page)
++ * of the blob to be decapsulated.
++ * - outbuf Physical pointer (within an accessible secure memory page)
++ * of the decapsulated output.
++ * - secretsz Size of input blob, in bytes.
++ * - auth If nonzero, assume AES-CCM for decapsulation, else use ECB
++ *
++ * Note: this uses 32-bit pointers at present
++ */
++static int blob_decap_desc(u32 **desc, dma_addr_t keymod, u16 keymodsz,
++ dma_addr_t blobbuf, dma_addr_t outbuf,
++ u16 blobsz, bool auth)
++{
++ u32 *tdesc, tmpdesc[INITIAL_DESCSZ];
++ u16 dsize, idx;
++
++ memset(tmpdesc, 0, INITIAL_DESCSZ * sizeof(u32));
++ idx = 1;
++
++ /* Load key modifier */
++ tmpdesc[idx++] = CMD_LOAD | LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY |
++ ((12 << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK) |
++ (keymodsz & LDST_LEN_MASK);
++
++ tmpdesc[idx++] = (u32)keymod;
++
++ /* Compensate BKEK + MAC tag */
++ tmpdesc[idx++] = CMD_SEQ_IN_PTR | (blobsz + 32 + 16);
++
++ tmpdesc[idx++] = (u32)blobbuf;
++ tmpdesc[idx++] = CMD_SEQ_OUT_PTR | blobsz;
++ tmpdesc[idx++] = (u32)outbuf;
++
++ /* Decapsulate from secure memory partition to black blob */
++ tmpdesc[idx] = CMD_OPERATION | OP_TYPE_DECAP_PROTOCOL | OP_PCLID_BLOB |
++ OP_PCL_BLOB_PTXT_SECMEM | OP_PCL_BLOB_BLACK;
++ if (auth)
++ tmpdesc[idx] |= OP_PCL_BLOB_EKT;
++
++ idx++;
++ tmpdesc[0] = CMD_DESC_HDR | HDR_ONE | (idx & HDR_DESCLEN_MASK);
++ dsize = idx * sizeof(u32);
++
++ tdesc = kmalloc(dsize, GFP_KERNEL | GFP_DMA);
++ if (tdesc == NULL)
++ return 0;
++
++ memcpy(tdesc, tmpdesc, dsize);
++ *desc = tdesc;
++ return dsize;
++}
++
++/*
++ * Pseudo-synchronous ring access functions for carrying out key
++ * encapsulation and decapsulation
++ */
++
++struct sm_key_job_result {
++ int error;
++ struct completion completion;
++};
++
++void sm_key_job_done(struct device *dev, u32 *desc, u32 err, void *context)
++{
++ struct sm_key_job_result *res = context;
++
++ res->error = err; /* save off the error for postprocessing */
++ complete(&res->completion); /* mark us complete */
++}
++
++static int sm_key_job(struct device *ksdev, u32 *jobdesc)
++{
++ struct sm_key_job_result testres;
++ struct caam_drv_private_sm *kspriv;
++ int rtn = 0;
++
++ kspriv = dev_get_drvdata(ksdev);
++
++ init_completion(&testres.completion);
++
++ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, sm_key_job_done,
++ &testres);
++ if (!rtn) {
++ wait_for_completion_interruptible(&testres.completion);
++ rtn = testres.error;
++ }
++ return rtn;
++}
++
++/*
++ * Following section establishes the default methods for keystore access
++ * They are NOT intended for use external to this module
++ *
++ * In the present version, these are the only means for the higher-level
++ * interface to deal with the mechanics of accessing the phyiscal keystore
++ */
++
++
++int slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++ u32 i;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_alloc(): requesting slot for %d bytes\n", size);
++#endif
++
++ if (size > smpriv->slot_size)
++ return -EKEYREJECTED;
++
++ for (i = 0; i < ksdata->slot_count; i++) {
++ if (ksdata->slot[i].allocated == 0) {
++ ksdata->slot[i].allocated = 1;
++ (*slot) = i;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_alloc(): new slot %d allocated\n",
++ *slot);
++#endif
++ return 0;
++ }
++ }
++
++ return -ENOSPC;
++}
++EXPORT_SYMBOL(slot_alloc);
++
++int slot_dealloc(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++ u8 __iomem *slotdata;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_dealloc(): releasing slot %d\n", slot);
++#endif
++ if (slot >= ksdata->slot_count)
++ return -EINVAL;
++ slotdata = ksdata->base_address + slot * smpriv->slot_size;
++
++ if (ksdata->slot[slot].allocated == 1) {
++ /* Forcibly overwrite the data from the keystore */
++ memset(ksdata->base_address + slot * smpriv->slot_size, 0,
++ smpriv->slot_size);
++
++ ksdata->slot[slot].allocated = 0;
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_dealloc(): slot %d released\n", slot);
++#endif
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL(slot_dealloc);
++
++void *slot_get_address(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return NULL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_address(): slot %d is 0x%08x\n", slot,
++ (u32)ksdata->base_address + slot * smpriv->slot_size);
++#endif
++
++ return ksdata->base_address + slot * smpriv->slot_size;
++}
++
++u32 slot_get_base(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ /*
++ * There could potentially be more than one secure partition object
++ * associated with this keystore. For now, there is just one.
++ */
++
++ (void)slot;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_base(): slot %d = 0x%08x\n",
++ slot, (u32)ksdata->base_address);
++#endif
++
++ return (u32)(ksdata->base_address);
++}
++
++u32 slot_get_offset(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *ksdata = smpriv->pagedesc[unit].ksdata;
++
++ if (slot >= ksdata->slot_count)
++ return -EINVAL;
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_offset(): slot %d = %d\n", slot,
++ slot * smpriv->slot_size);
++#endif
++
++ return slot * smpriv->slot_size;
++}
++
++u32 slot_get_slot_size(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++
++#ifdef SM_DEBUG
++ dev_info(dev, "slot_get_slot_size(): slot %d = %d\n", slot,
++ smpriv->slot_size);
++#endif
++ /* All slots are the same size in the default implementation */
++ return smpriv->slot_size;
++}
++
++
++
++int kso_init_data(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ struct keystore_data *keystore_data = NULL;
++ u32 slot_count;
++ u32 keystore_data_size;
++
++ /*
++ * Calculate the required size of the keystore data structure, based
++ * on the number of keys that can fit in the partition.
++ */
++ slot_count = smpriv->page_size / smpriv->slot_size;
++#ifdef SM_DEBUG
++ dev_info(dev, "kso_init_data: %d slots initializing\n", slot_count);
++#endif
++
++ keystore_data_size = sizeof(struct keystore_data) +
++ slot_count *
++ sizeof(struct keystore_data_slot_info);
++
++ keystore_data = kzalloc(keystore_data_size, GFP_KERNEL);
++
++ if (keystore_data == NULL) {
++ retval = -ENOSPC;
++ goto out;
++ }
++
++#ifdef SM_DEBUG
++ dev_info(dev, "kso_init_data: keystore data size = %d\n",
++ keystore_data_size);
++#endif
++
++ /*
++ * Place the slot information structure directly after the keystore data
++ * structure.
++ */
++ keystore_data->slot = (struct keystore_data_slot_info *)
++ (keystore_data + 1);
++ keystore_data->slot_count = slot_count;
++
++ smpriv->pagedesc[unit].ksdata = keystore_data;
++ smpriv->pagedesc[unit].ksdata->base_address =
++ smpriv->pagedesc[unit].pg_base;
++
++ retval = 0;
++
++out:
++ if (retval != 0)
++ if (keystore_data != NULL)
++ kfree(keystore_data);
++
++
++ return retval;
++}
++
++void kso_cleanup_data(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ struct keystore_data *keystore_data = NULL;
++
++ if (smpriv->pagedesc[unit].ksdata != NULL)
++ keystore_data = smpriv->pagedesc[unit].ksdata;
++
++ /* Release the allocated keystore management data */
++ kfree(smpriv->pagedesc[unit].ksdata);
++
++ return;
++}
++
++
++
++/*
++ * Keystore management section
++ */
++
++void sm_init_keystore(struct device *dev)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++ smpriv->data_init = kso_init_data;
++ smpriv->data_cleanup = kso_cleanup_data;
++ smpriv->slot_alloc = slot_alloc;
++ smpriv->slot_dealloc = slot_dealloc;
++ smpriv->slot_get_address = slot_get_address;
++ smpriv->slot_get_base = slot_get_base;
++ smpriv->slot_get_offset = slot_get_offset;
++ smpriv->slot_get_slot_size = slot_get_slot_size;
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_init_keystore(): handlers installed\n");
++#endif
++}
++EXPORT_SYMBOL(sm_init_keystore);
++
++/* Return available pages/units */
++u32 sm_detect_keystore_units(struct device *dev)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++ return smpriv->localpages;
++}
++EXPORT_SYMBOL(sm_detect_keystore_units);
++
++/*
++ * Do any keystore specific initializations
++ */
++int sm_establish_keystore(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_establish_keystore(): unit %d initializing\n", unit);
++#endif
++
++ if (smpriv->data_init == NULL)
++ return -EINVAL;
++
++ /* Call the data_init function for any user setup */
++ return smpriv->data_init(dev, unit);
++}
++EXPORT_SYMBOL(sm_establish_keystore);
++
++void sm_release_keystore(struct device *dev, u32 unit)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++
++#ifdef SM_DEBUG
++ dev_info(dev, "sm_establish_keystore(): unit %d releasing\n", unit);
++#endif
++ if ((smpriv != NULL) && (smpriv->data_cleanup != NULL))
++ smpriv->data_cleanup(dev, unit);
++
++ return;
++}
++EXPORT_SYMBOL(sm_release_keystore);
++
++/*
++ * Subsequent interfacce (sm_keystore_*) forms the accessor interfacce to
++ * the keystore
++ */
++int sm_keystore_slot_alloc(struct device *dev, u32 unit, u32 size, u32 *slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++
++ spin_lock(&smpriv->kslock);
++
++ if ((smpriv->slot_alloc == NULL) ||
++ (smpriv->pagedesc[unit].ksdata == NULL))
++ goto out;
++
++ retval = smpriv->slot_alloc(dev, unit, size, slot);
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_alloc);
++
++int sm_keystore_slot_dealloc(struct device *dev, u32 unit, u32 slot)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++
++ spin_lock(&smpriv->kslock);
++
++ if ((smpriv->slot_alloc == NULL) ||
++ (smpriv->pagedesc[unit].ksdata == NULL))
++ goto out;
++
++ retval = smpriv->slot_dealloc(dev, unit, slot);
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_dealloc);
++
++int sm_keystore_slot_load(struct device *dev, u32 unit, u32 slot,
++ const u8 *key_data, u32 key_length)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ u32 slot_size;
++ u32 i;
++ u8 __iomem *slot_location;
++
++ spin_lock(&smpriv->kslock);
++
++ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
++
++ if (key_length > slot_size) {
++ retval = -EFBIG;
++ goto out;
++ }
++
++ slot_location = smpriv->slot_get_address(dev, unit, slot);
++
++ for (i = 0; i < key_length; i++)
++ slot_location[i] = key_data[i];
++
++ retval = 0;
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_load);
++
++int sm_keystore_slot_read(struct device *dev, u32 unit, u32 slot,
++ u32 key_length, u8 *key_data)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = -EINVAL;
++ u8 __iomem *slot_addr;
++ u32 slot_size;
++
++ spin_lock(&smpriv->kslock);
++
++ slot_addr = smpriv->slot_get_address(dev, unit, slot);
++ slot_size = smpriv->slot_get_slot_size(dev, unit, slot);
++
++ if (key_length > slot_size) {
++ retval = -EKEYREJECTED;
++ goto out;
++ }
++
++ memcpy(key_data, slot_addr, key_length);
++ retval = 0;
++
++out:
++ spin_unlock(&smpriv->kslock);
++ return retval;
++}
++EXPORT_SYMBOL(sm_keystore_slot_read);
++
++int sm_keystore_slot_encapsulate(struct device *dev, u32 unit, u32 inslot,
++ u32 outslot, u16 secretlen, u8 *keymod,
++ u16 keymodlen)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u32 slot_length, dsize, jstat;
++ u32 __iomem *encapdesc = NULL;
++ u8 __iomem *lkeymod, *inpslotaddr, *outslotaddr;
++ dma_addr_t keymod_dma;
++
++ /* Ensure that the full blob will fit in the key slot */
++ slot_length = smpriv->slot_get_slot_size(dev, unit, outslot);
++ if ((secretlen + 48) > slot_length)
++ goto out;
++
++ /* Get the base addresses of both keystore slots */
++ inpslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, inslot);
++ outslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, outslot);
++
++ /* Build the key modifier */
++ lkeymod = kmalloc(keymodlen, GFP_KERNEL | GFP_DMA);
++ memcpy(lkeymod, keymod, keymodlen);
++ keymod_dma = dma_map_single(dev, lkeymod, keymodlen, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++
++ /* Build the encapsulation job descriptor */
++ dsize = blob_encap_desc(&encapdesc, keymod_dma, keymodlen,
++ __pa(inpslotaddr), __pa(outslotaddr),
++ secretlen, 0);
++ if (!dsize) {
++ dev_err(dev, "can't alloc an encap descriptor\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ jstat = sm_key_job(dev, encapdesc);
++
++ dma_unmap_single(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++ kfree(encapdesc);
++
++out:
++ return retval;
++
++}
++EXPORT_SYMBOL(sm_keystore_slot_encapsulate);
++
++int sm_keystore_slot_decapsulate(struct device *dev, u32 unit, u32 inslot,
++ u32 outslot, u16 secretlen, u8 *keymod,
++ u16 keymodlen)
++{
++ struct caam_drv_private_sm *smpriv = dev_get_drvdata(dev);
++ int retval = 0;
++ u32 slot_length, dsize, jstat;
++ u32 __iomem *decapdesc = NULL;
++ u8 __iomem *lkeymod, *inpslotaddr, *outslotaddr;
++ dma_addr_t keymod_dma;
++
++ /* Ensure that the decap data will fit in the key slot */
++ slot_length = smpriv->slot_get_slot_size(dev, unit, outslot);
++ if (secretlen > slot_length)
++ goto out;
++
++ /* Get the base addresses of both keystore slots */
++ inpslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, inslot);
++ outslotaddr = (u8 *)smpriv->slot_get_address(dev, unit, outslot);
++
++ /* Build the key modifier */
++ lkeymod = kmalloc(keymodlen, GFP_KERNEL | GFP_DMA);
++ memcpy(lkeymod, keymod, keymodlen);
++ keymod_dma = dma_map_single(dev, lkeymod, keymodlen, DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++
++ /* Build the decapsulation job descriptor */
++ dsize = blob_decap_desc(&decapdesc, keymod_dma, keymodlen,
++ __pa(inpslotaddr), __pa(outslotaddr),
++ secretlen, 0);
++ if (!dsize) {
++ dev_err(dev, "can't alloc a decap descriptor\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ jstat = sm_key_job(dev, decapdesc);
++
++ dma_unmap_single(dev, keymod_dma, keymodlen, DMA_TO_DEVICE);
++ kfree(decapdesc);
++
++out:
++ return retval;
++
++}
++EXPORT_SYMBOL(sm_keystore_slot_decapsulate);
++
++
++/*
++ * Initialization/shutdown subsystem
++ * Assumes statically-invoked startup/shutdown from the controller driver
++ * for the present time, to be reworked when a device tree becomes
++ * available. This code will not modularize in present form.
++ *
++ * Also, simply uses ring 0 for execution at the present
++ */
++
++int caam_sm_startup(struct platform_device *pdev)
++{
++ struct device *ctrldev, *smdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_sm *smpriv;
++ struct caam_drv_private_jr *jrpriv; /* need this for reg page */
++ struct platform_device *sm_pdev;
++ struct sm_page_descriptor *lpagedesc;
++ u32 page, pgstat, lpagect, detectedpage;
++
++ struct device_node *np;
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++
++ /*
++ * Set up the private block for secure memory
++ * Only one instance is possible
++ */
++ smpriv = kzalloc(sizeof(struct caam_drv_private_sm), GFP_KERNEL);
++ if (smpriv == NULL) {
++ dev_err(ctrldev, "can't alloc private mem for secure memory\n");
++ return -ENOMEM;
++ }
++ smpriv->parentdev = ctrldev; /* copy of parent dev is handy */
++
++ /* Create the dev */
++#ifdef CONFIG_OF
++ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-caam-sm");
++ sm_pdev = of_platform_device_create(np, "caam_sm", ctrldev);
++#else
++ sm_pdev = platform_device_register_data(ctrldev, "caam_sm", 0,
++ smpriv,
++ sizeof(struct caam_drv_private_sm));
++#endif
++ if (sm_pdev == NULL) {
++ kfree(smpriv);
++ return -EINVAL;
++ }
++ smdev = &sm_pdev->dev;
++ dev_set_drvdata(smdev, smpriv);
++ ctrlpriv->smdev = smdev;
++
++ /*
++ * Collect configuration limit data for reference
++ * This batch comes from the partition data/vid registers in perfmon
++ */
++ smpriv->max_pages = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_NUMPG_MASK) >>
++ SMPART_MAX_NUMPG_SHIFT) + 1;
++ smpriv->top_partition = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_PNUM_MASK) >>
++ SMPART_MAX_PNUM_SHIFT) + 1;
++ smpriv->top_page = ((rd_reg32(&ctrlpriv->ctrl->perfmon.smpart)
++ & SMPART_MAX_PG_MASK) >> SMPART_MAX_PG_SHIFT) + 1;
++ smpriv->page_size = 1024 << ((rd_reg32(&ctrlpriv->ctrl->perfmon.smvid)
++ & SMVID_PG_SIZE_MASK) >> SMVID_PG_SIZE_SHIFT);
++ smpriv->slot_size = 1 << CONFIG_CRYPTO_DEV_FSL_CAAM_SM_SLOTSIZE;
++
++#ifdef SM_DEBUG
++ dev_info(smdev, "max pages = %d, top partition = %d\n",
++ smpriv->max_pages, smpriv->top_partition);
++ dev_info(smdev, "top page = %d, page size = %d (total = %d)\n",
++ smpriv->top_page, smpriv->page_size,
++ smpriv->top_page * smpriv->page_size);
++ dev_info(smdev, "selected slot size = %d\n", smpriv->slot_size);
++#endif
++
++ /*
++ * Now probe for partitions/pages to which we have access. Note that
++ * these have likely been set up by a bootloader or platform
++ * provisioning application, so we have to assume that we "inherit"
++ * a configuration and work within the constraints of what it might be.
++ *
++ * Assume use of the zeroth ring in the present iteration (until
++ * we can divorce the controller and ring drivers, and then assign
++ * an SM instance to any ring instance).
++ */
++ smpriv->smringdev = ctrlpriv->jrdev[0];
++ jrpriv = dev_get_drvdata(smpriv->smringdev);
++ lpagect = 0;
++ lpagedesc = kzalloc(sizeof(struct sm_page_descriptor)
++ * smpriv->max_pages, GFP_KERNEL);
++ if (lpagedesc == NULL) {
++ kfree(smpriv);
++ return -ENOMEM;
++ }
++
++ for (page = 0; page < smpriv->max_pages; page++) {
++ wr_reg32(&jrpriv->rregs->sm_cmd,
++ ((page << SMC_PAGE_SHIFT) & SMC_PAGE_MASK) |
++ (SMC_CMD_PAGE_INQUIRY & SMC_CMD_MASK));
++ pgstat = rd_reg32(&jrpriv->rregs->sm_status);
++ if (((pgstat & SMCS_PGWON_MASK) >> SMCS_PGOWN_SHIFT)
++ == SMCS_PGOWN_OWNED) { /* our page? */
++ lpagedesc[page].phys_pagenum =
++ (pgstat & SMCS_PAGE_MASK) >> SMCS_PAGE_SHIFT;
++ lpagedesc[page].own_part =
++ (pgstat & SMCS_PART_SHIFT) >> SMCS_PART_MASK;
++ lpagedesc[page].pg_base = ctrlpriv->sm_base +
++ ((smpriv->page_size * page) / sizeof(u32));
++ lpagect++;
++#ifdef SM_DEBUG
++ dev_info(smdev,
++ "physical page %d, owning partition = %d\n",
++ lpagedesc[page].phys_pagenum,
++ lpagedesc[page].own_part);
++#endif
++ }
++ }
++
++ smpriv->pagedesc = kzalloc(sizeof(struct sm_page_descriptor) * lpagect,
++ GFP_KERNEL);
++ if (smpriv->pagedesc == NULL) {
++ kfree(lpagedesc);
++ kfree(smpriv);
++ return -ENOMEM;
++ }
++ smpriv->localpages = lpagect;
++
++ detectedpage = 0;
++ for (page = 0; page < smpriv->max_pages; page++) {
++ if (lpagedesc[page].pg_base != NULL) { /* e.g. live entry */
++ memcpy(&smpriv->pagedesc[detectedpage],
++ &lpagedesc[page],
++ sizeof(struct sm_page_descriptor));
++#ifdef SM_DEBUG_CONT
++ sm_show_page(smdev, &smpriv->pagedesc[detectedpage]);
++#endif
++ detectedpage++;
++ }
++ }
++
++ kfree(lpagedesc);
++
++ sm_init_keystore(smdev);
++
++ return 0;
++}
++
++void caam_sm_shutdown(struct platform_device *pdev)
++{
++ struct device *ctrldev, *smdev;
++ struct caam_drv_private *priv;
++ struct caam_drv_private_sm *smpriv;
++
++ ctrldev = &pdev->dev;
++ priv = dev_get_drvdata(ctrldev);
++ smdev = priv->smdev;
++ smpriv = dev_get_drvdata(smdev);
++
++ kfree(smpriv->pagedesc);
++ kfree(smpriv);
++}
++EXPORT_SYMBOL(caam_sm_shutdown);
++#ifdef CONFIG_OF
++static void __exit caam_sm_exit(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_put(dev_node);
++
++ caam_sm_shutdown(pdev);
++
++ return;
++}
++
++static int __init caam_sm_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_get(dev_node);
++
++ caam_sm_startup(pdev);
++
++ return 0;
++}
++
++module_init(caam_sm_init);
++module_exit(caam_sm_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Secure Memory / Keystore");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif
+diff -Nur linux-4.1.3/drivers/crypto/caam/sm_test.c linux-xbian-imx6/drivers/crypto/caam/sm_test.c
+--- linux-4.1.3/drivers/crypto/caam/sm_test.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/crypto/caam/sm_test.c 2015-07-27 23:13:04.213947410 +0200
+@@ -0,0 +1,844 @@
++/*
++ * Secure Memory / Keystore Exemplification Module
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ *
++ * Serves as a functional example, and as a self-contained unit test for
++ * the functionality contained in sm_store.c.
++ *
++ * The example function, caam_sm_example_init(), runs a thread that:
++ *
++ * - initializes a set of fixed keys
++ * - stores one copy in clear buffers
++ * - stores them again in secure memory
++ * - extracts stored keys back out for use
++ * - intializes 3 data buffers for a test:
++ * (1) containing cleartext
++ * (2) to hold ciphertext encrypted with an extracted black key
++ * (3) to hold extracted cleartext decrypted with an equivalent clear key
++ *
++ * The function then builds simple job descriptors that reference the key
++ * material and buffers as initialized, and executes an encryption job
++ * with a black key, and a decryption job using a the same key held in the
++ * clear. The output of the decryption job is compared to the original
++ * cleartext; if they don't compare correctly, one can assume a key problem
++ * exists, where the function will exit with an error.
++ *
++ * This module can use a substantial amount of refactoring, which may occur
++ * after the API gets some mileage. Furthermore, expect this module to
++ * eventually disappear once the API is integrated into "real" software.
++ */
++
++#include "compat.h"
++#include "intern.h"
++#include "desc.h"
++#include "error.h"
++#include "jr.h"
++#include "sm.h"
++
++static u8 skeymod[] = {
++ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
++ 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00
++};
++static u8 symkey[] = {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
++};
++
++static u8 symdata[] = {
++ 0x00, 0x01, 0x02, 0x03, 0x04, 0x0f, 0x06, 0x07,
++ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
++ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
++ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
++ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
++ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
++ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
++ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
++ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
++ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
++ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
++ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
++ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
++ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
++ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
++ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
++ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
++ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
++ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
++ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
++ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
++ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
++ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
++ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
++ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
++ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
++ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
++ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
++};
++
++static int mk_job_desc(u32 *desc, dma_addr_t key, u16 keysz, dma_addr_t indata,
++ dma_addr_t outdata, u16 sz, u32 cipherdir, u32 keymode)
++{
++ desc[1] = CMD_KEY | CLASS_1 | (keysz & KEY_LENGTH_MASK) | keymode;
++ desc[2] = (u32)key;
++ desc[3] = CMD_OPERATION | OP_TYPE_CLASS1_ALG | OP_ALG_AAI_ECB |
++ cipherdir;
++ desc[4] = CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 |
++ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1 | sz;
++ desc[5] = (u32)indata;
++ desc[6] = CMD_FIFO_STORE | FIFOST_TYPE_MESSAGE_DATA | sz;
++ desc[7] = (u32)outdata;
++
++ desc[0] = CMD_DESC_HDR | HDR_ONE | (8 & HDR_DESCLEN_MASK);
++ return 8 * sizeof(u32);
++}
++
++struct exec_test_result {
++ int error;
++ struct completion completion;
++};
++
++void exec_test_done(struct device *dev, u32 *desc, u32 err, void *context)
++{
++ struct exec_test_result *res = context;
++
++ if (err) {
++ char tmp[CAAM_ERROR_STR_MAX];
++ dev_err(dev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
++ }
++
++ res->error = err;
++ complete(&res->completion);
++}
++
++static int exec_test_job(struct device *ksdev, u32 *jobdesc)
++{
++ struct exec_test_result testres;
++ struct caam_drv_private_sm *kspriv;
++ int rtn = 0;
++
++ kspriv = dev_get_drvdata(ksdev);
++
++ init_completion(&testres.completion);
++
++ rtn = caam_jr_enqueue(kspriv->smringdev, jobdesc, exec_test_done,
++ &testres);
++ if (!rtn) {
++ wait_for_completion_interruptible(&testres.completion);
++ rtn = testres.error;
++ }
++ return rtn;
++}
++
++
++int caam_sm_example_init(struct platform_device *pdev)
++{
++ struct device *ctrldev, *ksdev;
++ struct caam_drv_private *ctrlpriv;
++ struct caam_drv_private_sm *kspriv;
++ u32 unit, units, jdescsz;
++ int stat, jstat, rtnval = 0;
++ u8 __iomem *syminp, *symint, *symout = NULL;
++ dma_addr_t syminp_dma, symint_dma, symout_dma;
++ u8 __iomem *black_key_des, *black_key_aes128;
++ u8 __iomem *black_key_aes256;
++ dma_addr_t black_key_des_dma, black_key_aes128_dma;
++ dma_addr_t black_key_aes256_dma;
++ u8 __iomem *clear_key_des, *clear_key_aes128, *clear_key_aes256;
++ dma_addr_t clear_key_des_dma, clear_key_aes128_dma;
++ dma_addr_t clear_key_aes256_dma;
++ u32 __iomem *jdesc;
++ u32 keyslot_des, keyslot_aes128, keyslot_aes256 = 0;
++
++ jdesc = NULL;
++ black_key_des = black_key_aes128 = black_key_aes256 = NULL;
++ clear_key_des = clear_key_aes128 = clear_key_aes256 = NULL;
++
++ /* We can lose this cruft once we can get a pdev by name */
++ ctrldev = &pdev->dev;
++ ctrlpriv = dev_get_drvdata(ctrldev);
++ ksdev = ctrlpriv->smdev;
++ kspriv = dev_get_drvdata(ksdev);
++ if (kspriv == NULL)
++ return -ENODEV;
++
++ /* Now that we have the dev for the single SM instance, connect */
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test_init() running\n");
++#endif
++ /* Probe to see what keystores are available to us */
++ units = sm_detect_keystore_units(ksdev);
++ if (!units)
++ dev_err(ksdev, "caam_sm_test: no keystore units available\n");
++
++ /*
++ * MX6 bootloader stores some stuff in unit 0, so let's
++ * use 1 or above
++ */
++ if (units < 2) {
++ dev_err(ksdev, "caam_sm_test: insufficient keystore units\n");
++ return -ENODEV;
++ }
++ unit = 1;
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: %d keystore units available\n", units);
++#endif
++
++ /* Initialize/Establish Keystore */
++ sm_establish_keystore(ksdev, unit); /* Initalize store in #1 */
++
++ /*
++ * Top of main test thread
++ */
++
++ /* Allocate test data blocks (input, intermediate, output) */
++ syminp = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ symint = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ symout = kmalloc(256, GFP_KERNEL | GFP_DMA);
++ if ((syminp == NULL) || (symint == NULL) || (symout == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get test data buffers\n");
++ goto freemem;
++ }
++
++ /* Allocate storage for 3 black keys: encapsulated 8, 16, 32 */
++ black_key_des = kmalloc(16, GFP_KERNEL | GFP_DMA); /* padded to 16... */
++ black_key_aes128 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ black_key_aes256 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ if ((black_key_des == NULL) || (black_key_aes128 == NULL) ||
++ (black_key_aes256 == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't black key buffers\n");
++ goto freemem;
++ }
++
++ clear_key_des = kmalloc(8, GFP_KERNEL | GFP_DMA);
++ clear_key_aes128 = kmalloc(16, GFP_KERNEL | GFP_DMA);
++ clear_key_aes256 = kmalloc(32, GFP_KERNEL | GFP_DMA);
++ if ((clear_key_des == NULL) || (clear_key_aes128 == NULL) ||
++ (clear_key_aes256 == NULL)) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get clear key buffers\n");
++ goto freemem;
++ }
++
++ /* Allocate storage for job descriptor */
++ jdesc = kmalloc(8 * sizeof(u32), GFP_KERNEL | GFP_DMA);
++ if (jdesc == NULL) {
++ rtnval = -ENOMEM;
++ dev_err(ksdev, "caam_sm_test: can't get descriptor buffers\n");
++ goto freemem;
++ }
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all buffers allocated\n");
++#endif
++
++ /* Load up input data block, clear outputs */
++ memcpy(syminp, symdata, 256);
++ memset(symint, 0, 256);
++ memset(symout, 0, 256);
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++
++ dev_info(ksdev, "caam_sm_test: data buffers initialized\n");
++#endif
++
++ /* Load up clear keys */
++ memcpy(clear_key_des, symkey, 8);
++ memcpy(clear_key_aes128, symkey, 16);
++ memcpy(clear_key_aes256, symkey, 32);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all clear keys loaded\n");
++#endif
++
++ /*
++ * Place clear keys in keystore.
++ * All the interesting stuff happens here.
++ */
++ /* 8 bit DES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 8, &keyslot_des);
++ if (stat)
++ goto freemem;
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 8 byte key slot in %d\n", keyslot_des);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_des, clear_key_des,
++ 8);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 8 byte key in %d\n",
++ keyslot_des);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* 16 bit AES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 16, &keyslot_aes128);
++ if (stat) {
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 16 byte key slot in %d\n",
++ keyslot_aes128);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_aes128,
++ clear_key_aes128, 16);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 16 byte key in %d\n",
++ keyslot_aes128);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* 32 bit AES key */
++ stat = sm_keystore_slot_alloc(ksdev, unit, 32, &keyslot_aes256);
++ if (stat) {
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: 32 byte key slot in %d\n",
++ keyslot_aes256);
++#endif
++ stat = sm_keystore_slot_load(ksdev, unit, keyslot_aes256,
++ clear_key_aes256, 32);
++ if (stat) {
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: can't load 32 byte key in %d\n",
++ keyslot_aes128);
++#endif
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes256);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ goto freemem;
++ }
++
++ /* Encapsulate all keys as SM blobs */
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_des,
++ keyslot_des, 8, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate DES key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_aes128,
++ keyslot_aes128, 16, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_encapsulate(ksdev, unit, keyslot_aes256,
++ keyslot_aes256, 32, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't encapsulate AES256 key\n");
++ goto freekeys;
++ }
++
++ /* Now decapsulate as black key blobs */
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_des,
++ keyslot_des, 8, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate DES key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_aes128,
++ keyslot_aes128, 16, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ stat = sm_keystore_slot_decapsulate(ksdev, unit, keyslot_aes256,
++ keyslot_aes256, 32, skeymod, 8);
++ if (stat) {
++ dev_info(ksdev, "caam_sm_test: can't decapsulate AES128 key\n");
++ goto freekeys;
++ }
++
++ /* Extract 8/16/32 byte black keys */
++ sm_keystore_slot_read(ksdev, unit, keyslot_des, 8, black_key_des);
++ sm_keystore_slot_read(ksdev, unit, keyslot_aes128, 16,
++ black_key_aes128);
++ sm_keystore_slot_read(ksdev, unit, keyslot_aes256, 32,
++ black_key_aes256);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: all black keys extracted\n");
++#endif
++
++ /* DES encrypt using 8 byte black key */
++ black_key_des_dma = dma_map_single(ksdev, black_key_des, 8,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_des_dma, 8, DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_des_dma, 8, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_DES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_des_dma, 8, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 8 byte key\n");
++#endif
++
++ /* DES decrypt using 8 byte clear key */
++ clear_key_des_dma = dma_map_single(ksdev, clear_key_des, 8,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_des_dma, 8, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_des_dma, 8, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_DES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_des_dma, 8, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 8 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 8-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 8-byte key test match OK\n");
++
++ /* AES-128 encrypt using 16 byte black key */
++ black_key_aes128_dma = dma_map_single(ksdev, black_key_aes128, 16,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_aes128_dma, 16,
++ DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_aes128_dma, 16, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_aes128_dma, 16, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 16 byte key\n");
++#endif
++
++ /* AES-128 decrypt using 16 byte clear key */
++ clear_key_aes128_dma = dma_map_single(ksdev, clear_key_aes128, 16,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_aes128_dma, 16,
++ DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_aes128_dma, 16, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_aes128_dma, 16, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 16 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 16-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 16-byte key test match OK\n");
++
++ /* AES-256 encrypt using 32 byte black key */
++ black_key_aes256_dma = dma_map_single(ksdev, black_key_aes256, 32,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, black_key_aes256_dma, 32,
++ DMA_TO_DEVICE);
++ syminp_dma = dma_map_single(ksdev, syminp, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, black_key_aes256_dma, 32, syminp_dma,
++ symint_dma, 256,
++ OP_ALG_ENCRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, syminp_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, black_key_aes256_dma, 32, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "input block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[0], syminp[1], syminp[2], syminp[3],
++ syminp[4], syminp[5], syminp[6], syminp[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ syminp[8], syminp[9], syminp[10], syminp[11],
++ syminp[12], syminp[13], syminp[14], syminp[15]);
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "caam_sm_test: encrypt cycle with 32 byte key\n");
++#endif
++
++ /* AES-256 decrypt using 32-byte black key */
++ clear_key_aes256_dma = dma_map_single(ksdev, clear_key_aes256, 32,
++ DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, clear_key_aes256_dma, 32,
++ DMA_TO_DEVICE);
++ symint_dma = dma_map_single(ksdev, symint, 256, DMA_TO_DEVICE);
++ dma_sync_single_for_device(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ symout_dma = dma_map_single(ksdev, symout, 256, DMA_FROM_DEVICE);
++
++ jdescsz = mk_job_desc(jdesc, clear_key_aes256_dma, 32, symint_dma,
++ symout_dma, 256,
++ OP_ALG_DECRYPT | OP_ALG_ALGSEL_AES, 0);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "jobdesc:\n");
++ dev_info(ksdev, "0x%08x\n", jdesc[0]);
++ dev_info(ksdev, "0x%08x\n", jdesc[1]);
++ dev_info(ksdev, "0x%08x\n", jdesc[2]);
++ dev_info(ksdev, "0x%08x\n", jdesc[3]);
++ dev_info(ksdev, "0x%08x\n", jdesc[4]);
++ dev_info(ksdev, "0x%08x\n", jdesc[5]);
++ dev_info(ksdev, "0x%08x\n", jdesc[6]);
++ dev_info(ksdev, "0x%08x\n", jdesc[7]);
++#endif
++
++ jstat = exec_test_job(ksdev, jdesc);
++
++ dma_sync_single_for_cpu(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symout_dma, 256, DMA_FROM_DEVICE);
++ dma_unmap_single(ksdev, symint_dma, 256, DMA_TO_DEVICE);
++ dma_unmap_single(ksdev, clear_key_aes256_dma, 32, DMA_TO_DEVICE);
++
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "intermediate block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[0], symint[1], symint[2], symint[3],
++ symint[4], symint[5], symint[6], symint[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symint[8], symint[9], symint[10], symint[11],
++ symint[12], symint[13], symint[14], symint[15]);
++ dev_info(ksdev, "decrypted block:\n");
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[0], symout[1], symout[2], symout[3],
++ symout[4], symout[5], symout[6], symout[7]);
++ dev_info(ksdev, "0x%02x 0x%02x 0x%02x 0x%02x " \
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ symout[8], symout[9], symout[10], symout[11],
++ symout[12], symout[13], symout[14], symout[15]);
++ dev_info(ksdev, "caam_sm_test: decrypt cycle with 32 byte key\n");
++#endif
++
++ /* Check result */
++ if (memcmp(symout, syminp, 256)) {
++ dev_info(ksdev, "caam_sm_test: 32-byte key test mismatch\n");
++ rtnval = -1;
++ goto freekeys;
++ } else
++ dev_info(ksdev, "caam_sm_test: 32-byte key test match OK\n");
++
++
++ /* Remove 8/16/32 byte keys from keystore */
++freekeys:
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_des);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_des);
++
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes128);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_aes128);
++
++ stat = sm_keystore_slot_dealloc(ksdev, unit, keyslot_aes256);
++ if (stat)
++ dev_info(ksdev, "caam_sm_test: can't release slot %d\n",
++ keyslot_aes256);
++
++
++ /* Free resources */
++freemem:
++#ifdef SM_TEST_DETAIL
++ dev_info(ksdev, "caam_sm_test: cleaning up\n");
++#endif
++ kfree(syminp);
++ kfree(symint);
++ kfree(symout);
++ kfree(clear_key_des);
++ kfree(clear_key_aes128);
++ kfree(clear_key_aes256);
++ kfree(black_key_des);
++ kfree(black_key_aes128);
++ kfree(black_key_aes256);
++ kfree(jdesc);
++
++ /* Disconnect from keystore and leave */
++ sm_release_keystore(ksdev, unit);
++
++ return rtnval;
++}
++EXPORT_SYMBOL(caam_sm_example_init);
++
++void caam_sm_example_shutdown(void)
++{
++ /* unused in present version */
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return;
++
++ of_node_get(dev_node);
++
++}
++
++static int __init caam_sm_test_init(void)
++{
++ struct device_node *dev_node;
++ struct platform_device *pdev;
++
++ /*
++ * Do of_find_compatible_node() then of_find_device_by_node()
++ * once a functional device tree is available
++ */
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
++ if (!dev_node) {
++ dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
++ if (!dev_node)
++ return -ENODEV;
++ }
++
++ pdev = of_find_device_by_node(dev_node);
++ if (!pdev)
++ return -ENODEV;
++
++ of_node_put(dev_node);
++
++ caam_sm_example_init(pdev);
++
++ return 0;
++}
++
++
++/* Module-based initialization needs to wait for dev tree */
++#ifdef CONFIG_OF
++module_init(caam_sm_test_init);
++module_exit(caam_sm_example_shutdown);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM Keystore Usage Example");
++MODULE_AUTHOR("Freescale Semiconductor - NMSG/MAD");
++#endif
+diff -Nur linux-4.1.3/drivers/crypto/caam/snvsregs.h linux-xbian-imx6/drivers/crypto/caam/snvsregs.h
+--- linux-4.1.3/drivers/crypto/caam/snvsregs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/crypto/caam/snvsregs.h 2015-07-27 23:13:04.213947410 +0200
+@@ -0,0 +1,237 @@
++/*
++ * SNVS hardware register-level view
++ *
++ * Copyright (C) 2012-2014 Freescale Semiconductor, Inc., All Rights Reserved
++ */
++
++#ifndef SNVSREGS_H
++#define SNVSREGS_H
++
++#include <linux/types.h>
++#include <linux/io.h>
++
++/*
++ * SNVS High Power Domain
++ * Includes security violations, HA counter, RTC, alarm
++ */
++struct snvs_hp {
++ u32 lock; /* HPLR - HP Lock */
++ u32 cmd; /* HPCOMR - HP Command */
++ u32 ctl; /* HPCR - HP Control */
++ u32 secvio_intcfg; /* HPSICR - Security Violation Int Config */
++ u32 secvio_ctl; /* HPSVCR - Security Violation Control */
++ u32 status; /* HPSR - HP Status */
++ u32 secvio_status; /* HPSVSR - Security Violation Status */
++ u32 ha_counteriv; /* High Assurance Counter IV */
++ u32 ha_counter; /* High Assurance Counter */
++ u32 rtc_msb; /* Real Time Clock/Counter MSB */
++ u32 rtc_lsb; /* Real Time Counter LSB */
++ u32 time_alarm_msb; /* Time Alarm MSB */
++ u32 time_alarm_lsb; /* Time Alarm LSB */
++};
++
++#define HP_LOCK_HAC_LCK 0x00040000
++#define HP_LOCK_HPSICR_LCK 0x00020000
++#define HP_LOCK_HPSVCR_LCK 0x00010000
++#define HP_LOCK_MKEYSEL_LCK 0x00000200
++#define HP_LOCK_TAMPCFG_LCK 0x00000100
++#define HP_LOCK_TAMPFLT_LCK 0x00000080
++#define HP_LOCK_SECVIO_LCK 0x00000040
++#define HP_LOCK_GENP_LCK 0x00000020
++#define HP_LOCK_MONOCTR_LCK 0x00000010
++#define HP_LOCK_CALIB_LCK 0x00000008
++#define HP_LOCK_SRTC_LCK 0x00000004
++#define HP_LOCK_ZMK_RD_LCK 0x00000002
++#define HP_LOCK_ZMK_WT_LCK 0x00000001
++
++#define HP_CMD_NONPRIV_AXS 0x80000000
++#define HP_CMD_HAC_STOP 0x00080000
++#define HP_CMD_HAC_CLEAR 0x00040000
++#define HP_CMD_HAC_LOAD 0x00020000
++#define HP_CMD_HAC_CFG_EN 0x00010000
++#define HP_CMD_SNVS_MSTR_KEY 0x00002000
++#define HP_CMD_PROG_ZMK 0x00001000
++#define HP_CMD_SW_LPSV 0x00000400
++#define HP_CMD_SW_FSV 0x00000200
++#define HP_CMD_SW_SV 0x00000100
++#define HP_CMD_LP_SWR_DIS 0x00000020
++#define HP_CMD_LP_SWR 0x00000010
++#define HP_CMD_SSM_SFNS_DIS 0x00000004
++#define HP_CMD_SSM_ST_DIS 0x00000002
++#define HP_CMD_SMM_ST 0x00000001
++
++#define HP_CTL_TIME_SYNC 0x00010000
++#define HP_CTL_CAL_VAL_SHIFT 10
++#define HP_CTL_CAL_VAL_MASK (0x1f << HP_CTL_CALIB_SHIFT)
++#define HP_CTL_CALIB_EN 0x00000100
++#define HP_CTL_PI_FREQ_SHIFT 4
++#define HP_CTL_PI_FREQ_MASK (0xf << HP_CTL_PI_FREQ_SHIFT)
++#define HP_CTL_PI_EN 0x00000008
++#define HP_CTL_TIMEALARM_EN 0x00000002
++#define HP_CTL_RTC_EN 0x00000001
++
++#define HP_SECVIO_INTEN_EN 0x10000000
++#define HP_SECVIO_INTEN_SRC5 0x00000020
++#define HP_SECVIO_INTEN_SRC4 0x00000010
++#define HP_SECVIO_INTEN_SRC3 0x00000008
++#define HP_SECVIO_INTEN_SRC2 0x00000004
++#define HP_SECVIO_INTEN_SRC1 0x00000002
++#define HP_SECVIO_INTEN_SRC0 0x00000001
++#define HP_SECVIO_INTEN_ALL 0x8000003f
++
++#define HP_SECVIO_ICTL_CFG_SHIFT 30
++#define HP_SECVIO_ICTL_CFG_MASK (0x3 << HP_SECVIO_ICTL_CFG_SHIFT)
++#define HP_SECVIO_ICTL_CFG5_SHIFT 5
++#define HP_SECVIO_ICTL_CFG5_MASK (0x3 << HP_SECVIO_ICTL_CFG5_SHIFT)
++#define HP_SECVIO_ICTL_CFG_DISABLE 0
++#define HP_SECVIO_ICTL_CFG_NONFATAL 1
++#define HP_SECVIO_ICTL_CFG_FATAL 2
++#define HP_SECVIO_ICTL_CFG4_FATAL 0x00000010
++#define HP_SECVIO_ICTL_CFG3_FATAL 0x00000008
++#define HP_SECVIO_ICTL_CFG2_FATAL 0x00000004
++#define HP_SECVIO_ICTL_CFG1_FATAL 0x00000002
++#define HP_SECVIO_ICTL_CFG0_FATAL 0x00000001
++
++#define HP_STATUS_ZMK_ZERO 0x80000000
++#define HP_STATUS_OTPMK_ZERO 0x08000000
++#define HP_STATUS_OTPMK_SYN_SHIFT 16
++#define HP_STATUS_OTPMK_SYN_MASK (0x1ff << HP_STATUS_OTPMK_SYN_SHIFT)
++#define HP_STATUS_SSM_ST_SHIFT 8
++#define HP_STATUS_SSM_ST_MASK (0xf << HP_STATUS_SSM_ST_SHIFT)
++#define HP_STATUS_SSM_ST_INIT 0
++#define HP_STATUS_SSM_ST_HARDFAIL 1
++#define HP_STATUS_SSM_ST_SOFTFAIL 3
++#define HP_STATUS_SSM_ST_INITINT 8
++#define HP_STATUS_SSM_ST_CHECK 9
++#define HP_STATUS_SSM_ST_NONSECURE 11
++#define HP_STATUS_SSM_ST_TRUSTED 13
++#define HP_STATUS_SSM_ST_SECURE 15
++
++#define HP_SECVIOST_ZMK_ECC_FAIL 0x08000000 /* write to clear */
++#define HP_SECVIOST_ZMK_SYN_SHIFT 16
++#define HP_SECVIOST_ZMK_SYN_MASK (0x1ff << HP_SECVIOST_ZMK_SYN_SHIFT)
++#define HP_SECVIOST_SECVIO5 0x00000020
++#define HP_SECVIOST_SECVIO4 0x00000010
++#define HP_SECVIOST_SECVIO3 0x00000008
++#define HP_SECVIOST_SECVIO2 0x00000004
++#define HP_SECVIOST_SECVIO1 0x00000002
++#define HP_SECVIOST_SECVIO0 0x00000001
++#define HP_SECVIOST_SECVIOMASK 0x0000003f
++
++/*
++ * SNVS Low Power Domain
++ * Includes glitch detector, SRTC, alarm, monotonic counter, ZMK
++ */
++struct snvs_lp {
++ u32 lock;
++ u32 ctl;
++ u32 mstr_key_ctl; /* Master Key Control */
++ u32 secvio_ctl; /* Security Violation Control */
++ u32 tamper_filt_cfg; /* Tamper Glitch Filters Configuration */
++ u32 tamper_det_cfg; /* Tamper Detectors Configuration */
++ u32 status;
++ u32 srtc_msb; /* Secure Real Time Clock/Counter MSB */
++ u32 srtc_lsb; /* Secure Real Time Clock/Counter LSB */
++ u32 time_alarm; /* Time Alarm */
++ u32 smc_msb; /* Secure Monotonic Counter MSB */
++ u32 smc_lsb; /* Secure Monotonic Counter LSB */
++ u32 pwr_glitch_det; /* Power Glitch Detector */
++ u32 gen_purpose;
++ u32 zmk[8]; /* Zeroizable Master Key */
++};
++
++#define LP_LOCK_MKEYSEL_LCK 0x00000200
++#define LP_LOCK_TAMPDET_LCK 0x00000100
++#define LP_LOCK_TAMPFLT_LCK 0x00000080
++#define LP_LOCK_SECVIO_LCK 0x00000040
++#define LP_LOCK_GENP_LCK 0x00000020
++#define LP_LOCK_MONOCTR_LCK 0x00000010
++#define LP_LOCK_CALIB_LCK 0x00000008
++#define LP_LOCK_SRTC_LCK 0x00000004
++#define LP_LOCK_ZMK_RD_LCK 0x00000002
++#define LP_LOCK_ZMK_WT_LCK 0x00000001
++
++#define LP_CTL_CAL_VAL_SHIFT 10
++#define LP_CTL_CAL_VAL_MASK (0x1f << LP_CTL_CAL_VAL_SHIFT)
++#define LP_CTL_CALIB_EN 0x00000100
++#define LP_CTL_SRTC_INVAL_EN 0x00000010
++#define LP_CTL_WAKE_INT_EN 0x00000008
++#define LP_CTL_MONOCTR_EN 0x00000004
++#define LP_CTL_TIMEALARM_EN 0x00000002
++#define LP_CTL_SRTC_EN 0x00000001
++
++#define LP_MKEYCTL_ZMKECC_SHIFT 8
++#define LP_MKEYCTL_ZMKECC_MASK (0xff << LP_MKEYCTL_ZMKECC_SHIFT)
++#define LP_MKEYCTL_ZMKECC_EN 0x00000010
++#define LP_MKEYCTL_ZMKECC_VAL 0x00000008
++#define LP_MKEYCTL_ZMKECC_PROG 0x00000004
++#define LP_MKEYCTL_MKSEL_SHIFT 0
++#define LP_MKEYCTL_MKSEL_MASK (3 << LP_MKEYCTL_MKSEL_SHIFT)
++#define LP_MKEYCTL_MK_OTP 0
++#define LP_MKEYCTL_MK_ZMK 2
++#define LP_MKEYCTL_MK_COMB 3
++
++#define LP_SECVIO_CTL_SRC5 0x20
++#define LP_SECVIO_CTL_SRC4 0x10
++#define LP_SECVIO_CTL_SRC3 0x08
++#define LP_SECVIO_CTL_SRC2 0x04
++#define LP_SECVIO_CTL_SRC1 0x02
++#define LP_SECVIO_CTL_SRC0 0x01
++
++#define LP_TAMPFILT_EXT2_EN 0x80000000
++#define LP_TAMPFILT_EXT2_SHIFT 24
++#define LP_TAMPFILT_EXT2_MASK (0x1f << LP_TAMPFILT_EXT2_SHIFT)
++#define LP_TAMPFILT_EXT1_EN 0x00800000
++#define LP_TAMPFILT_EXT1_SHIFT 16
++#define LP_TAMPFILT_EXT1_MASK (0x1f << LP_TAMPFILT_EXT1_SHIFT)
++#define LP_TAMPFILT_WM_EN 0x00000080
++#define LP_TAMPFILT_WM_SHIFT 0
++#define LP_TAMPFILT_WM_MASK (0x1f << LP_TAMPFILT_WM_SHIFT)
++
++#define LP_TAMPDET_OSC_BPS 0x10000000
++#define LP_TAMPDET_VRC_SHIFT 24
++#define LP_TAMPDET_VRC_MASK (3 << LP_TAMPFILT_VRC_SHIFT)
++#define LP_TAMPDET_HTDC_SHIFT 20
++#define LP_TAMPDET_HTDC_MASK (3 << LP_TAMPFILT_HTDC_SHIFT)
++#define LP_TAMPDET_LTDC_SHIFT 16
++#define LP_TAMPDET_LTDC_MASK (3 << LP_TAMPFILT_LTDC_SHIFT)
++#define LP_TAMPDET_POR_OBS 0x00008000
++#define LP_TAMPDET_PFD_OBS 0x00004000
++#define LP_TAMPDET_ET2_EN 0x00000400
++#define LP_TAMPDET_ET1_EN 0x00000200
++#define LP_TAMPDET_WMT2_EN 0x00000100
++#define LP_TAMPDET_WMT1_EN 0x00000080
++#define LP_TAMPDET_VT_EN 0x00000040
++#define LP_TAMPDET_TT_EN 0x00000020
++#define LP_TAMPDET_CT_EN 0x00000010
++#define LP_TAMPDET_MCR_EN 0x00000004
++#define LP_TAMPDET_SRTCR_EN 0x00000002
++
++#define LP_STATUS_SECURE
++#define LP_STATUS_NONSECURE
++#define LP_STATUS_SCANEXIT 0x00100000 /* all write 1 clear here on */
++#define LP_STATUS_EXT_SECVIO 0x00010000
++#define LP_STATUS_ET2 0x00000400
++#define LP_STATUS_ET1 0x00000200
++#define LP_STATUS_WMT2 0x00000100
++#define LP_STATUS_WMT1 0x00000080
++#define LP_STATUS_VTD 0x00000040
++#define LP_STATUS_TTD 0x00000020
++#define LP_STATUS_CTD 0x00000010
++#define LP_STATUS_PGD 0x00000008
++#define LP_STATUS_MCR 0x00000004
++#define LP_STATUS_SRTCR 0x00000002
++#define LP_STATUS_LPTA 0x00000001
++
++/* Full SNVS register page, including version/options */
++struct snvs_full {
++ struct snvs_hp hp;
++ struct snvs_lp lp;
++ u32 rsvd[731]; /* deadspace 0x08c-0xbf7 */
++
++ /* Version / Revision / Option ID space - end of register page */
++ u32 vid; /* 0xbf8 HP Version ID (VID 1) */
++ u32 opt_rev; /* 0xbfc HP Options / Revision (VID 2) */
++};
++
++#endif /* SNVSREGS_H */
+diff -Nur linux-4.1.3/drivers/dma/imx-sdma.c linux-xbian-imx6/drivers/dma/imx-sdma.c
+--- linux-4.1.3/drivers/dma/imx-sdma.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/dma/imx-sdma.c 2015-07-27 23:13:04.285691435 +0200
+@@ -29,6 +29,7 @@
+ #include <linux/semaphore.h>
+ #include <linux/spinlock.h>
+ #include <linux/device.h>
++#include <linux/genalloc.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/firmware.h>
+ #include <linux/slab.h>
+@@ -232,6 +233,14 @@
+
+ struct sdma_engine;
+
++enum sdma_mode {
++ SDMA_MODE_INVALID = 0,
++ SDMA_MODE_LOOP,
++ SDMA_MODE_NORMAL,
++ SDMA_MODE_P2P,
++ SDMA_MODE_NO_BD,
++};
++
+ /**
+ * struct sdma_channel - housekeeping for a SDMA channel
+ *
+@@ -244,6 +253,7 @@
+ * @word_size peripheral access size
+ * @buf_tail ID of the buffer that was processed
+ * @num_bd max NUM_BD. number of descriptors currently handling
++ * @bd_iram flag indicating the memory location of buffer descriptor
+ */
+ struct sdma_channel {
+ struct sdma_engine *sdma;
+@@ -258,12 +268,16 @@
+ unsigned int period_len;
+ struct sdma_buffer_descriptor *bd;
+ dma_addr_t bd_phys;
++ bool bd_iram;
+ unsigned int pc_from_device, pc_to_device;
+- unsigned long flags;
+- dma_addr_t per_address;
++ unsigned int device_to_device;
++ unsigned int other_script;
++ enum sdma_mode mode;
++ dma_addr_t per_address, per_address2;
+ unsigned long event_mask[2];
+ unsigned long watermark_level;
+ u32 shp_addr, per_addr;
++ u32 data_addr1, data_addr2;
+ struct dma_chan chan;
+ spinlock_t lock;
+ struct dma_async_tx_descriptor desc;
+@@ -271,11 +285,8 @@
+ unsigned int chn_count;
+ unsigned int chn_real_count;
+ struct tasklet_struct tasklet;
+- struct imx_dma_data data;
+ };
+
+-#define IMX_DMA_SG_LOOP BIT(0)
+-
+ #define MAX_DMA_CHANNELS 32
+ #define MXC_SDMA_DEFAULT_PRIORITY 1
+ #define MXC_SDMA_MIN_PRIORITY 1
+@@ -327,6 +338,7 @@
+ spinlock_t channel_0_lock;
+ u32 script_number;
+ struct sdma_script_start_addrs *script_addrs;
++ struct gen_pool *iram_pool;
+ const struct sdma_driver_data *drvdata;
+ };
+
+@@ -546,12 +558,14 @@
+ dma_addr_t buf_phys;
+ int ret;
+ unsigned long flags;
++ bool use_iram = true;
+
+- buf_virt = dma_alloc_coherent(NULL,
+- size,
+- &buf_phys, GFP_KERNEL);
++ buf_virt = gen_pool_dma_alloc(sdma->iram_pool, size, &buf_phys);
+ if (!buf_virt) {
+- return -ENOMEM;
++ use_iram = false;
++ buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL);
++ if (!buf_virt)
++ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&sdma->channel_0_lock, flags);
+@@ -568,7 +582,10 @@
+
+ spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
+
+- dma_free_coherent(NULL, size, buf_virt, buf_phys);
++ if (use_iram)
++ gen_pool_free(sdma->iram_pool, (unsigned long)buf_virt, size);
++ else
++ dma_free_coherent(NULL, size, buf_virt, buf_phys);
+
+ return ret;
+ }
+@@ -654,14 +671,31 @@
+ sdmac->desc.callback(sdmac->desc.callback_param);
+ }
+
++static void sdma_handle_other_intr(struct sdma_channel *sdmac)
++{
++ if (sdmac->desc.callback)
++ sdmac->desc.callback(sdmac->desc.callback_param);
++}
++
+ static void sdma_tasklet(unsigned long data)
+ {
+ struct sdma_channel *sdmac = (struct sdma_channel *) data;
++ struct sdma_engine *sdma = sdmac->sdma;
+
+- if (sdmac->flags & IMX_DMA_SG_LOOP)
++ switch (sdmac->mode) {
++ case SDMA_MODE_LOOP:
+ sdma_handle_channel_loop(sdmac);
+- else
++ break;
++ case SDMA_MODE_NORMAL:
+ mxc_sdma_handle_channel_normal(sdmac);
++ break;
++ case SDMA_MODE_NO_BD:
++ sdma_handle_other_intr(sdmac);
++ break;
++ default:
++ dev_err(sdma->dev, "invalid SDMA MODE!\n");
++ break;
++ }
+ }
+
+ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+@@ -678,7 +712,7 @@
+ int channel = fls(stat) - 1;
+ struct sdma_channel *sdmac = &sdma->channel[channel];
+
+- if (sdmac->flags & IMX_DMA_SG_LOOP)
++ if (sdmac->mode & SDMA_MODE_LOOP)
+ sdma_update_channel_loop(sdmac);
+
+ tasklet_schedule(&sdmac->tasklet);
+@@ -702,9 +736,12 @@
+ * two peripherals or memory-to-memory transfers
+ */
+ int per_2_per = 0, emi_2_emi = 0;
++ int other = 0;
+
+ sdmac->pc_from_device = 0;
+ sdmac->pc_to_device = 0;
++ sdmac->device_to_device = 0;
++ sdmac->other_script = 0;
+
+ switch (peripheral_type) {
+ case IMX_DMATYPE_MEMORY:
+@@ -733,7 +770,6 @@
+ case IMX_DMATYPE_CSPI:
+ case IMX_DMATYPE_EXT:
+ case IMX_DMATYPE_SSI:
+- case IMX_DMATYPE_SAI:
+ per_2_emi = sdma->script_addrs->app_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_app_addr;
+ break;
+@@ -751,11 +787,6 @@
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ break;
+ case IMX_DMATYPE_ASRC:
+- per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
+- emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
+- per_2_per = sdma->script_addrs->per_2_per_addr;
+- break;
+- case IMX_DMATYPE_ASRC_SP:
+ per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
+ emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
+ per_2_per = sdma->script_addrs->per_2_per_addr;
+@@ -774,12 +805,17 @@
+ case IMX_DMATYPE_IPU_MEMORY:
+ emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
+ break;
++ case IMX_DMATYPE_HDMI:
++ other = sdma->script_addrs->hdmi_dma_addr;
++ break;
+ default:
+ break;
+ }
+
+ sdmac->pc_from_device = per_2_emi;
+ sdmac->pc_to_device = emi_2_per;
++ sdmac->device_to_device = per_2_per;
++ sdmac->other_script = other;
+ }
+
+ static int sdma_load_context(struct sdma_channel *sdmac)
+@@ -792,11 +828,14 @@
+ int ret;
+ unsigned long flags;
+
+- if (sdmac->direction == DMA_DEV_TO_MEM) {
++ if (sdmac->direction == DMA_DEV_TO_MEM)
+ load_address = sdmac->pc_from_device;
+- } else {
++ else if (sdmac->direction == DMA_DEV_TO_DEV)
++ load_address = sdmac->device_to_device;
++ else if (sdmac->direction == DMA_MEM_TO_DEV)
+ load_address = sdmac->pc_to_device;
+- }
++ else
++ load_address = sdmac->other_script;
+
+ if (load_address < 0)
+ return load_address;
+@@ -816,11 +855,16 @@
+ /* Send by context the event mask,base address for peripheral
+ * and watermark level
+ */
+- context->gReg[0] = sdmac->event_mask[1];
+- context->gReg[1] = sdmac->event_mask[0];
+- context->gReg[2] = sdmac->per_addr;
+- context->gReg[6] = sdmac->shp_addr;
+- context->gReg[7] = sdmac->watermark_level;
++ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
++ context->gReg[4] = sdmac->data_addr1;
++ context->gReg[6] = sdmac->data_addr2;
++ } else {
++ context->gReg[0] = sdmac->event_mask[1];
++ context->gReg[1] = sdmac->event_mask[0];
++ context->gReg[2] = sdmac->per_addr;
++ context->gReg[6] = sdmac->shp_addr;
++ context->gReg[7] = sdmac->watermark_level;
++ }
+
+ bd0->mode.command = C0_SETDM;
+ bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
+@@ -854,6 +898,7 @@
+ static int sdma_config_channel(struct dma_chan *chan)
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
++ struct imx_dma_data *data = sdmac->chan.private;
+ int ret;
+
+ sdma_disable_channel(chan);
+@@ -862,12 +907,19 @@
+ sdmac->event_mask[1] = 0;
+ sdmac->shp_addr = 0;
+ sdmac->per_addr = 0;
++ sdmac->data_addr1 = 0;
++ sdmac->data_addr2 = 0;
+
+- if (sdmac->event_id0) {
++ if (sdmac->event_id0 >= 0) {
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
+ }
++ if (sdmac->event_id1) {
++ if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
++ return -EINVAL;
++ sdma_event_enable(sdmac, sdmac->event_id1);
++ }
+
+ switch (sdmac->peripheral_type) {
+ case IMX_DMATYPE_DSP:
+@@ -887,19 +939,75 @@
+ (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
+ /* Handle multiple event channels differently */
+ if (sdmac->event_id1) {
+- sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
+- if (sdmac->event_id1 > 31)
+- __set_bit(31, &sdmac->watermark_level);
+- sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
+- if (sdmac->event_id0 > 31)
+- __set_bit(30, &sdmac->watermark_level);
++ if (sdmac->event_id0 > 31) {
++ sdmac->event_mask[0] |= 0;
++ __set_bit(28, &sdmac->watermark_level);
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id0 % 32);
++ } else {
++ sdmac->event_mask[1] |= 0;
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id0 % 32);
++ }
++ if (sdmac->event_id1 > 31) {
++ sdmac->event_mask[0] |= 0;
++ __set_bit(29, &sdmac->watermark_level);
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id1 % 32);
++ } else {
++ sdmac->event_mask[1] |= 0;
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id1 % 32);
++ }
++ /* BIT 11:
++ * 1 : Source on SPBA
++ * 0 : Source on AIPS
++ */
++ __set_bit(11, &sdmac->watermark_level);
++ /* BIT 12:
++ * 1 : Destination on SPBA
++ * 0 : Destination on AIPS
++ */
++ __set_bit(12, &sdmac->watermark_level);
++ __set_bit(31, &sdmac->watermark_level);
++ /* BIT 31:
++ * 1 : Amount of samples to be transferred is
++ * unknown and script will keep on transferring
++ * samples as long as both events are detected
++ * and script must be manually stopped by the
++ * application.
++ * 0 : The amount of samples to be is equal to
++ * the count field of mode word
++ * */
++ __set_bit(25, &sdmac->watermark_level);
++ __clear_bit(24, &sdmac->watermark_level);
+ } else {
+- __set_bit(sdmac->event_id0, sdmac->event_mask);
++ if (sdmac->event_id0 > 31) {
++ sdmac->event_mask[0] = 0;
++ sdmac->event_mask[1] |=
++ BIT(sdmac->event_id0 % 32);
++ } else {
++ sdmac->event_mask[0] |=
++ BIT(sdmac->event_id0 % 32);
++ sdmac->event_mask[1] = 0;
++ }
+ }
+ /* Watermark Level */
+ sdmac->watermark_level |= sdmac->watermark_level;
+ /* Address */
+- sdmac->shp_addr = sdmac->per_address;
++ if (sdmac->direction == DMA_DEV_TO_DEV) {
++ sdmac->shp_addr = sdmac->per_address2;
++ sdmac->per_addr = sdmac->per_address;
++ } else if (sdmac->direction == DMA_TRANS_NONE) {
++ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI ||
++ !data->data_addr1 || !data->data_addr2)
++ return -EINVAL;
++ sdmac->data_addr1 = *(u32 *)data->data_addr1;
++ sdmac->data_addr2 = *(u32 *)data->data_addr2;
++ sdmac->watermark_level = 0;
++ } else {
++ sdmac->shp_addr = sdmac->per_address;
++ }
+ } else {
+ sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
+ }
+@@ -931,13 +1039,19 @@
+ int channel = sdmac->channel;
+ int ret = -EBUSY;
+
+- sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
+- GFP_KERNEL);
++ sdmac->bd_iram = true;
++ sdmac->bd = gen_pool_dma_alloc(sdma->iram_pool, PAGE_SIZE, &sdmac->bd_phys);
+ if (!sdmac->bd) {
+- ret = -ENOMEM;
+- goto out;
++ sdmac->bd_iram = false;
++ sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
++ if (!sdmac->bd) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ }
+
++ memset(sdmac->bd, 0, PAGE_SIZE);
++
+ sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
+ sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
+
+@@ -987,6 +1101,7 @@
+
+ sdmac->peripheral_type = data->peripheral_type;
+ sdmac->event_id0 = data->dma_request;
++ sdmac->event_id1 = data->dma_request2;
+
+ clk_enable(sdmac->sdma->clk_ipg);
+ clk_enable(sdmac->sdma->clk_ahb);
+@@ -1004,6 +1119,9 @@
+ /* txd.flags will be overwritten in prep funcs */
+ sdmac->desc.flags = DMA_CTRL_ACK;
+
++ /* Set SDMA channel mode to unvalid to avoid misconfig */
++ sdmac->mode = SDMA_MODE_INVALID;
++
+ return 0;
+ }
+
+@@ -1014,7 +1132,7 @@
+
+ sdma_disable_channel(chan);
+
+- if (sdmac->event_id0)
++ if (sdmac->event_id0 >= 0)
+ sdma_event_disable(sdmac, sdmac->event_id0);
+ if (sdmac->event_id1)
+ sdma_event_disable(sdmac, sdmac->event_id1);
+@@ -1024,7 +1142,10 @@
+
+ sdma_set_channel_priority(sdmac, 0);
+
+- dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
++ if (sdmac->bd_iram)
++ gen_pool_free(sdma->iram_pool, (unsigned long)sdmac->bd, PAGE_SIZE);
++ else
++ dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
+
+ clk_disable(sdma->clk_ipg);
+ clk_disable(sdma->clk_ahb);
+@@ -1045,7 +1166,7 @@
+ return NULL;
+ sdmac->status = DMA_IN_PROGRESS;
+
+- sdmac->flags = 0;
++ sdmac->mode = SDMA_MODE_NORMAL;
+
+ sdmac->buf_tail = 0;
+
+@@ -1134,13 +1255,13 @@
+ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+- unsigned long flags)
++ unsigned long flags, void *context)
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_engine *sdma = sdmac->sdma;
+- int num_periods = buf_len / period_len;
+ int channel = sdmac->channel;
+ int ret, i = 0, buf = 0;
++ int num_periods;
+
+ dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
+@@ -1152,12 +1273,33 @@
+ sdmac->buf_tail = 0;
+ sdmac->period_len = period_len;
+
+- sdmac->flags |= IMX_DMA_SG_LOOP;
+ sdmac->direction = direction;
++
++ switch (sdmac->direction) {
++ case DMA_DEV_TO_DEV:
++ sdmac->mode = SDMA_MODE_P2P;
++ break;
++ case DMA_TRANS_NONE:
++ sdmac->mode = SDMA_MODE_NO_BD;
++ break;
++ case DMA_MEM_TO_DEV:
++ case DMA_DEV_TO_MEM:
++ sdmac->mode = SDMA_MODE_LOOP;
++ break;
++ default:
++ dev_err(sdma->dev, "invalid SDMA direction %d\n", direction);
++ return NULL;
++ }
++
+ ret = sdma_load_context(sdmac);
+ if (ret)
+ goto err_out;
+
++ if (period_len)
++ num_periods = buf_len / period_len;
++ else
++ return &sdmac->desc;
++
+ if (num_periods > NUM_BD) {
+ dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
+ channel, num_periods, NUM_BD);
+@@ -1216,7 +1358,16 @@
+ {
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
++ if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
++ sdmac->per_address = dmaengine_cfg->src_addr;
++ sdmac->per_address2 = dmaengine_cfg->dst_addr;
++ sdmac->watermark_level = 0;
++ sdmac->watermark_level |=
++ dmaengine_cfg->src_maxburst;
++ sdmac->watermark_level |=
++ dmaengine_cfg->dst_maxburst << 16;
++ sdmac->word_size = dmaengine_cfg->dst_addr_width;
++ } else if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ sdmac->per_address = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+ dmaengine_cfg->src_addr_width;
+@@ -1238,7 +1389,7 @@
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+ u32 residue;
+
+- if (sdmac->flags & IMX_DMA_SG_LOOP)
++ if (sdmac->mode & SDMA_MODE_LOOP)
+ residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
+ else
+ residue = sdmac->chn_count - sdmac->chn_real_count;
+@@ -1286,8 +1437,7 @@
+ unsigned short *ram_code;
+
+ if (!fw) {
+- dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
+- /* In this case we just use the ROM firmware. */
++ dev_err(sdma->dev, "firmware not found\n");
+ return;
+ }
+
+@@ -1302,7 +1452,10 @@
+ goto err_firmware;
+ switch (header->version_major) {
+ case 1:
+- sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
++ if (header->version_minor > 0)
++ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
++ else
++ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+ break;
+ case 2:
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+@@ -1337,7 +1490,7 @@
+ release_firmware(fw);
+ }
+
+-static int sdma_get_firmware(struct sdma_engine *sdma,
++static int __init sdma_get_firmware(struct sdma_engine *sdma,
+ const char *fw_name)
+ {
+ int ret;
+@@ -1349,9 +1502,9 @@
+ return ret;
+ }
+
+-static int sdma_init(struct sdma_engine *sdma)
++static int __init sdma_init(struct sdma_engine *sdma)
+ {
+- int i, ret;
++ int i, ret, ccbsize;
+ dma_addr_t ccb_phys;
+
+ clk_enable(sdma->clk_ipg);
+@@ -1360,14 +1513,17 @@
+ /* Be sure SDMA has not started yet */
+ writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
+
+- sdma->channel_control = dma_alloc_coherent(NULL,
+- MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
+- sizeof(struct sdma_context_data),
+- &ccb_phys, GFP_KERNEL);
++ ccbsize = MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)
++ + sizeof(struct sdma_context_data);
+
++ sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
+ if (!sdma->channel_control) {
+- ret = -ENOMEM;
+- goto err_dma_alloc;
++ sdma->channel_control = dma_alloc_coherent(NULL, ccbsize,
++ &ccb_phys, GFP_KERNEL);
++ if (!sdma->channel_control) {
++ ret = -ENOMEM;
++ goto err_dma_alloc;
++ }
+ }
+
+ sdma->context = (void *)sdma->channel_control +
+@@ -1419,14 +1575,12 @@
+
+ static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
+ {
+- struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct imx_dma_data *data = fn_param;
+
+ if (!imx_dma_is_general_purpose(chan))
+ return false;
+
+- sdmac->data = *data;
+- chan->private = &sdmac->data;
++ chan->private = data;
+
+ return true;
+ }
+@@ -1444,11 +1598,12 @@
+ data.dma_request = dma_spec->args[0];
+ data.peripheral_type = dma_spec->args[1];
+ data.priority = dma_spec->args[2];
++ data.dma_request2 = 0;
+
+ return dma_request_channel(mask, sdma_filter_fn, &data);
+ }
+
+-static int sdma_probe(struct platform_device *pdev)
++static int __init sdma_probe(struct platform_device *pdev)
+ {
+ const struct of_device_id *of_id =
+ of_match_device(sdma_dt_ids, &pdev->dev);
+@@ -1547,6 +1702,11 @@
+ &sdma->dma_device.channels);
+ }
+
++ if (np)
++ sdma->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!sdma->iram_pool)
++ dev_warn(&pdev->dev, "no iram assigned, using external mem\n");
++
+ ret = sdma_init(sdma);
+ if (ret)
+ goto err_init;
+@@ -1583,7 +1743,7 @@
+ sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
+ sdma->dma_device.device_tx_status = sdma_tx_status;
+ sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
+- sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
++ sdma->dma_device.device_prep_dma_cyclic = (void*)sdma_prep_dma_cyclic;
+ sdma->dma_device.device_config = sdma_config;
+ sdma->dma_device.device_terminate_all = sdma_disable_channel;
+ sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+@@ -1594,8 +1754,6 @@
+ sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+ dma_set_max_seg_size(sdma->dma_device.dev, 65535);
+
+- platform_set_drvdata(pdev, sdma);
+-
+ ret = dma_async_device_register(&sdma->dma_device);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register\n");
+@@ -1647,10 +1805,13 @@
+ },
+ .id_table = sdma_devtypes,
+ .remove = sdma_remove,
+- .probe = sdma_probe,
+ };
+
+-module_platform_driver(sdma_driver);
++static int __init sdma_module_init(void)
++{
++ return platform_driver_probe(&sdma_driver, sdma_probe);
++}
++module_init(sdma_module_init);
+
+ MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
+ MODULE_DESCRIPTION("i.MX SDMA driver");
+diff -Nur linux-4.1.3/drivers/gpu/drm/Kconfig linux-xbian-imx6/drivers/gpu/drm/Kconfig
+--- linux-4.1.3/drivers/gpu/drm/Kconfig 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/gpu/drm/Kconfig 2015-07-27 23:13:04.353449682 +0200
+@@ -217,3 +217,5 @@
+ source "drivers/gpu/drm/amd/amdkfd/Kconfig"
+
+ source "drivers/gpu/drm/imx/Kconfig"
++
++source "drivers/gpu/drm/vivante/Kconfig"
+diff -Nur linux-4.1.3/drivers/gpu/drm/Makefile linux-xbian-imx6/drivers/gpu/drm/Makefile
+--- linux-4.1.3/drivers/gpu/drm/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/gpu/drm/Makefile 2015-07-27 23:13:04.353449682 +0200
+@@ -67,6 +67,7 @@
+ obj-$(CONFIG_DRM_TEGRA) += tegra/
+ obj-$(CONFIG_DRM_STI) += sti/
+ obj-$(CONFIG_DRM_IMX) += imx/
++obj-$(CONFIG_DRM_VIVANTE) += vivante/
+ obj-y += i2c/
+ obj-y += panel/
+ obj-y += bridge/
+diff -Nur linux-4.1.3/drivers/gpu/drm/vivante/Kconfig linux-xbian-imx6/drivers/gpu/drm/vivante/Kconfig
+--- linux-4.1.3/drivers/gpu/drm/vivante/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/gpu/drm/vivante/Kconfig 2015-07-27 23:13:04.744056042 +0200
+@@ -0,0 +1,6 @@
++config DRM_VIVANTE
++ tristate "Vivante GCCore"
++ depends on DRM
++ help
++ Choose this option if you have a Vivante graphics card.
++ If M is selected, the module will be called vivante.
+diff -Nur linux-4.1.3/drivers/gpu/drm/vivante/Makefile linux-xbian-imx6/drivers/gpu/drm/vivante/Makefile
+--- linux-4.1.3/drivers/gpu/drm/vivante/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/gpu/drm/vivante/Makefile 2015-07-27 23:13:04.744056042 +0200
+@@ -0,0 +1,29 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++#
++# Makefile for the drm device driver. This driver provides support for the
++# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
++
++ccflags-y := -Iinclude/drm
++vivante-y := vivante_drv.o
++
++obj-$(CONFIG_DRM_VIVANTE) += vivante.o
+diff -Nur linux-4.1.3/drivers/gpu/drm/vivante/vivante_drv.c linux-xbian-imx6/drivers/gpu/drm/vivante/vivante_drv.c
+--- linux-4.1.3/drivers/gpu/drm/vivante/vivante_drv.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/gpu/drm/vivante/vivante_drv.c 2015-07-27 23:13:04.744056042 +0200
+@@ -0,0 +1,112 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/* vivante_drv.c -- vivante driver -*- linux-c -*-
++ *
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Rickard E. (Rik) Faith <faith@valinux.com>
++ * Daryll Strauss <daryll@valinux.com>
++ * Gareth Hughes <gareth@valinux.com>
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++
++#include "drmP.h"
++#include "vivante_drv.h"
++
++#include "drm_pciids.h"
++
++static char platformdevicename[] = "Vivante GCCore";
++static struct platform_device *pplatformdev;
++
++static const struct file_operations viv_driver_fops = {
++ .owner = THIS_MODULE,
++ .open = drm_open,
++ .release = drm_release,
++ .unlocked_ioctl = drm_ioctl,
++ .mmap = drm_legacy_mmap,
++ .poll = drm_poll,
++ .llseek = noop_llseek,
++};
++
++static struct drm_driver driver = {
++// .driver_features = DRIVER_RENDER,
++ .fops = &viv_driver_fops,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
++ .set_busid = drm_platform_set_busid,
++#endif
++ .name = DRIVER_NAME,
++ .desc = DRIVER_DESC,
++ .date = DRIVER_DATE,
++ .major = DRIVER_MAJOR,
++ .minor = DRIVER_MINOR,
++ .patchlevel = DRIVER_PATCHLEVEL,
++};
++
++static int __init vivante_init(void)
++{
++ int retcode;
++
++ pplatformdev = platform_device_register_simple(platformdevicename,
++ -1, NULL, 0);
++ if (pplatformdev == NULL)
++ printk(KERN_ERR"Platform device is null\n");
++
++ retcode = drm_platform_init(&driver, pplatformdev);
++
++ return retcode;
++}
++
++static void __exit vivante_exit(void)
++{
++ if (pplatformdev) {
++ platform_device_unregister(pplatformdev);
++ pplatformdev = NULL;
++ }
++}
++
++module_init(vivante_init);
++module_exit(vivante_exit);
++
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++MODULE_LICENSE("GPL and additional rights");
+diff -Nur linux-4.1.3/drivers/gpu/drm/vivante/vivante_drv.h linux-xbian-imx6/drivers/gpu/drm/vivante/vivante_drv.h
+--- linux-4.1.3/drivers/gpu/drm/vivante/vivante_drv.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/gpu/drm/vivante/vivante_drv.h 2015-07-27 23:13:04.744056042 +0200
+@@ -0,0 +1,69 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/* vivante_drv.h -- Vivante DRM template customization -*- linux-c -*-
++ * Created: Wed Feb 14 12:32:32 2012 by John Zhao
++ */
++/*
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the "Software"),
++ * to deal in the Software without restriction, including without limitation
++ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
++ * and/or sell copies of the Software, and to permit persons to whom the
++ * Software is furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the next
++ * paragraph) shall be included in all copies or substantial portions of the
++ * Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
++ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
++ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
++ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
++ * OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * Authors:
++ * Gareth Hughes <gareth@valinux.com>
++ */
++
++#ifndef __VIVANTE_DRV_H__
++#define __VIVANTE_DRV_H__
++
++/* General customization:
++ */
++
++#include <drm/drm_legacy.h>
++#include <drm/drm_mm.h>
++
++#define DRIVER_AUTHOR "Vivante Inc."
++
++#define DRIVER_NAME "vivante"
++#define DRIVER_DESC "Vivante GCCore"
++#define DRIVER_DATE "20120216"
++
++#define DRIVER_MAJOR 1
++#define DRIVER_MINOR 0
++#define DRIVER_PATCHLEVEL 0
++
++#endif
+diff -Nur linux-4.1.3/drivers/Kconfig linux-xbian-imx6/drivers/Kconfig
+--- linux-4.1.3/drivers/Kconfig 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/Kconfig 2015-07-27 23:13:03.699781896 +0200
+@@ -182,4 +182,6 @@
+
+ source "drivers/android/Kconfig"
+
++source "drivers/mxc/Kconfig"
++
+ endmenu
+diff -Nur linux-4.1.3/drivers/Makefile linux-xbian-imx6/drivers/Makefile
+--- linux-4.1.3/drivers/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/Makefile 2015-07-27 23:13:03.699781896 +0200
+@@ -165,3 +165,4 @@
+ obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
+ obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
+ obj-$(CONFIG_ANDROID) += android/
++obj-y += mxc/
+diff -Nur linux-4.1.3/drivers/mfd/Kconfig linux-xbian-imx6/drivers/mfd/Kconfig
+--- linux-4.1.3/drivers/mfd/Kconfig 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/mfd/Kconfig 2015-07-27 23:13:06.023491162 +0200
+@@ -207,6 +207,13 @@
+ Additional drivers must be enabled in order to use the specific
+ features of the device.
+
++config MFD_MXC_HDMI
++ tristate "Freescale HDMI Core"
++ select MFD_CORE
++ help
++ This is the core driver for the Freescale i.MX6 on-chip HDMI.
++ This MFD driver connects with the video and audio drivers for HDMI.
++
+ config MFD_DLN2
+ tristate "Diolan DLN2 support"
+ select MFD_CORE
+diff -Nur linux-4.1.3/drivers/mfd/Makefile linux-xbian-imx6/drivers/mfd/Makefile
+--- linux-4.1.3/drivers/mfd/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/mfd/Makefile 2015-07-27 23:13:06.023491162 +0200
+@@ -185,3 +185,4 @@
+ intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o
+ obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o
+ obj-$(CONFIG_MFD_MT6397) += mt6397-core.o
++obj-$(CONFIG_MFD_MXC_HDMI) += mxc-hdmi-core.o
+diff -Nur linux-4.1.3/drivers/mfd/mxc-hdmi-core.c linux-xbian-imx6/drivers/mfd/mxc-hdmi-core.c
+--- linux-4.1.3/drivers/mfd/mxc-hdmi-core.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mfd/mxc-hdmi-core.c 2015-07-27 23:13:06.051391618 +0200
+@@ -0,0 +1,723 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/device.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/clk.h>
++#include <linux/spinlock.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++
++#include <linux/platform_device.h>
++#include <linux/regulator/machine.h>
++#include <asm/mach-types.h>
++
++#include <video/mxc_hdmi.h>
++#include <linux/ipu-v3.h>
++#include <video/mxc_edid.h>
++#include "../mxc/ipu3/ipu_prv.h"
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/of_device.h>
++#include <linux/mod_devicetable.h>
++
++struct mxc_hdmi_data {
++ struct platform_device *pdev;
++ unsigned long __iomem *reg_base;
++ unsigned long reg_phys_base;
++ struct device *dev;
++};
++
++static void __iomem *hdmi_base;
++static struct clk *isfr_clk;
++static struct clk *iahb_clk;
++static spinlock_t irq_spinlock;
++static spinlock_t edid_spinlock;
++static unsigned int sample_rate;
++static unsigned long pixel_clk_rate;
++static struct clk *pixel_clk;
++static int hdmi_ratio;
++int mxc_hdmi_ipu_id;
++int mxc_hdmi_disp_id;
++static int hdmi_core_edid_status;
++static struct mxc_edid_cfg hdmi_core_edid_cfg;
++static int hdmi_core_init;
++static unsigned int hdmi_dma_running;
++static struct snd_pcm_substream *hdmi_audio_stream_playback;
++static unsigned int hdmi_cable_state;
++static unsigned int hdmi_blank_state;
++static unsigned int hdmi_abort_state;
++static spinlock_t hdmi_audio_lock, hdmi_blank_state_lock, hdmi_cable_state_lock;
++
++void hdmi_set_dvi_mode(unsigned int state)
++{
++ if (state) {
++ mxc_hdmi_abort_stream();
++ hdmi_cec_stop_device();
++ } else {
++ hdmi_cec_start_device();
++ }
++}
++EXPORT_SYMBOL(hdmi_set_dvi_mode);
++
++unsigned int hdmi_set_cable_state(unsigned int state)
++{
++ unsigned long flags;
++ struct snd_pcm_substream *substream = hdmi_audio_stream_playback;
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags);
++ hdmi_cable_state = state;
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags);
++
++ if (check_hdmi_state() && substream && hdmi_abort_state) {
++ hdmi_abort_state = 0;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(hdmi_set_cable_state);
++
++unsigned int hdmi_set_blank_state(unsigned int state)
++{
++ unsigned long flags;
++ struct snd_pcm_substream *substream = hdmi_audio_stream_playback;
++
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags);
++ hdmi_blank_state = state;
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags);
++
++ if (check_hdmi_state() && substream && hdmi_abort_state) {
++ hdmi_abort_state = 0;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
++ }
++ return 0;
++}
++EXPORT_SYMBOL(hdmi_set_blank_state);
++
++static void hdmi_audio_abort_stream(struct snd_pcm_substream *substream)
++{
++ unsigned long flags;
++
++ snd_pcm_stream_lock_irqsave(substream, flags);
++
++ if (snd_pcm_running(substream)) {
++ hdmi_abort_state = 1;
++ substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
++ }
++
++ snd_pcm_stream_unlock_irqrestore(substream, flags);
++}
++
++int mxc_hdmi_abort_stream(void)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ if (hdmi_audio_stream_playback)
++ hdmi_audio_abort_stream(hdmi_audio_stream_playback);
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_hdmi_abort_stream);
++
++int check_hdmi_state(void)
++{
++ unsigned long flags1, flags2;
++ unsigned int ret;
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags1);
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags2);
++
++ ret = hdmi_cable_state && hdmi_blank_state;
++
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags2);
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags1);
++
++ return ret;
++}
++EXPORT_SYMBOL(check_hdmi_state);
++
++int mxc_hdmi_register_audio(struct snd_pcm_substream *substream)
++{
++ unsigned long flags, flags1;
++ int ret = 0;
++
++ snd_pcm_stream_lock_irqsave(substream, flags);
++
++ if (substream && check_hdmi_state()) {
++ spin_lock_irqsave(&hdmi_audio_lock, flags1);
++ if (hdmi_audio_stream_playback) {
++ pr_err("%s unconsist hdmi auido stream!\n", __func__);
++ ret = -EINVAL;
++ }
++ hdmi_audio_stream_playback = substream;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags1);
++ } else
++ ret = -EINVAL;
++
++ snd_pcm_stream_unlock_irqrestore(substream, flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(mxc_hdmi_register_audio);
++
++void mxc_hdmi_unregister_audio(struct snd_pcm_substream *substream)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ hdmi_audio_stream_playback = NULL;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++}
++EXPORT_SYMBOL(mxc_hdmi_unregister_audio);
++
++u8 hdmi_readb(unsigned int reg)
++{
++ u8 value;
++
++ value = __raw_readb(hdmi_base + reg);
++
++ return value;
++}
++EXPORT_SYMBOL(hdmi_readb);
++
++#ifdef DEBUG
++static bool overflow_lo;
++static bool overflow_hi;
++
++bool hdmi_check_overflow(void)
++{
++ u8 val, lo, hi;
++
++ val = hdmi_readb(HDMI_IH_FC_STAT2);
++ lo = (val & HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW) != 0;
++ hi = (val & HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW) != 0;
++
++ if ((lo != overflow_lo) || (hi != overflow_hi)) {
++ pr_debug("%s LowPriority=%d HighPriority=%d <=======================\n",
++ __func__, lo, hi);
++ overflow_lo = lo;
++ overflow_hi = hi;
++ return true;
++ }
++ return false;
++}
++#else
++bool hdmi_check_overflow(void)
++{
++ return false;
++}
++#endif
++EXPORT_SYMBOL(hdmi_check_overflow);
++
++void hdmi_writeb(u8 value, unsigned int reg)
++{
++ hdmi_check_overflow();
++ __raw_writeb(value, hdmi_base + reg);
++ hdmi_check_overflow();
++}
++EXPORT_SYMBOL(hdmi_writeb);
++
++void hdmi_mask_writeb(u8 data, unsigned int reg, u8 shift, u8 mask)
++{
++ u8 value = hdmi_readb(reg) & ~mask;
++ value |= (data << shift) & mask;
++ hdmi_writeb(value, reg);
++}
++EXPORT_SYMBOL(hdmi_mask_writeb);
++
++unsigned int hdmi_read4(unsigned int reg)
++{
++ /* read a four byte address from registers */
++ return (hdmi_readb(reg + 3) << 24) |
++ (hdmi_readb(reg + 2) << 16) |
++ (hdmi_readb(reg + 1) << 8) |
++ hdmi_readb(reg);
++}
++EXPORT_SYMBOL(hdmi_read4);
++
++void hdmi_write4(unsigned int value, unsigned int reg)
++{
++ /* write a four byte address to hdmi regs */
++ hdmi_writeb(value & 0xff, reg);
++ hdmi_writeb((value >> 8) & 0xff, reg + 1);
++ hdmi_writeb((value >> 16) & 0xff, reg + 2);
++ hdmi_writeb((value >> 24) & 0xff, reg + 3);
++}
++EXPORT_SYMBOL(hdmi_write4);
++
++static void initialize_hdmi_ih_mutes(void)
++{
++ u8 ih_mute;
++
++ /*
++ * Boot up defaults are:
++ * HDMI_IH_MUTE = 0x03 (disabled)
++ * HDMI_IH_MUTE_* = 0x00 (enabled)
++ */
++
++ /* Disable top level interrupt bits in HDMI block */
++ ih_mute = hdmi_readb(HDMI_IH_MUTE) |
++ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT;
++
++ hdmi_writeb(ih_mute, HDMI_IH_MUTE);
++
++ /* by default mask all interrupts */
++ hdmi_writeb(0xff, HDMI_VP_MASK);
++ hdmi_writeb(0xff, HDMI_FC_MASK0);
++ hdmi_writeb(0xff, HDMI_FC_MASK1);
++ hdmi_writeb(0xff, HDMI_FC_MASK2);
++ hdmi_writeb(0xff, HDMI_PHY_MASK0);
++ hdmi_writeb(0xff, HDMI_PHY_I2CM_INT_ADDR);
++ hdmi_writeb(0xff, HDMI_PHY_I2CM_CTLINT_ADDR);
++ hdmi_writeb(0xff, HDMI_AUD_INT);
++ hdmi_writeb(0xff, HDMI_AUD_SPDIFINT);
++ hdmi_writeb(0xff, HDMI_AUD_HBR_MASK);
++ hdmi_writeb(0xff, HDMI_GP_MASK);
++ hdmi_writeb(0xff, HDMI_A_APIINTMSK);
++ hdmi_writeb(0xff, HDMI_CEC_MASK);
++ hdmi_writeb(0xff, HDMI_I2CM_INT);
++ hdmi_writeb(0xff, HDMI_I2CM_CTLINT);
++
++ /* Disable interrupts in the IH_MUTE_* registers */
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT1);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_FC_STAT2);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AS_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_PHY_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_I2CM_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_VP_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_I2CMPHY_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++
++ /* Enable top level interrupt bits in HDMI block */
++ ih_mute &= ~(HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT |
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT);
++ hdmi_writeb(ih_mute, HDMI_IH_MUTE);
++}
++
++static void hdmi_set_clock_regenerator_n(unsigned int value)
++{
++ u8 val;
++
++ if (!hdmi_dma_running) {
++ hdmi_writeb(value & 0xff, HDMI_AUD_N1);
++ hdmi_writeb(0, HDMI_AUD_N2);
++ hdmi_writeb(0, HDMI_AUD_N3);
++ }
++
++ hdmi_writeb(value & 0xff, HDMI_AUD_N1);
++ hdmi_writeb((value >> 8) & 0xff, HDMI_AUD_N2);
++ hdmi_writeb((value >> 16) & 0x0f, HDMI_AUD_N3);
++
++ /* nshift factor = 0 */
++ val = hdmi_readb(HDMI_AUD_CTS3);
++ val &= ~HDMI_AUD_CTS3_N_SHIFT_MASK;
++ hdmi_writeb(val, HDMI_AUD_CTS3);
++}
++
++static void hdmi_set_clock_regenerator_cts(unsigned int cts)
++{
++ u8 val;
++
++ if (!hdmi_dma_running) {
++ hdmi_writeb(cts & 0xff, HDMI_AUD_CTS1);
++ hdmi_writeb(0, HDMI_AUD_CTS2);
++ hdmi_writeb(0, HDMI_AUD_CTS3);
++ }
++
++ /* Must be set/cleared first */
++ val = hdmi_readb(HDMI_AUD_CTS3);
++ val &= ~HDMI_AUD_CTS3_CTS_MANUAL;
++ hdmi_writeb(val, HDMI_AUD_CTS3);
++
++ hdmi_writeb(cts & 0xff, HDMI_AUD_CTS1);
++ hdmi_writeb((cts >> 8) & 0xff, HDMI_AUD_CTS2);
++ hdmi_writeb(((cts >> 16) & HDMI_AUD_CTS3_AUDCTS19_16_MASK) |
++ HDMI_AUD_CTS3_CTS_MANUAL, HDMI_AUD_CTS3);
++}
++
++struct imx6_hdmi_acr {
++ int FREQ;
++ int N;
++ int CTS;
++};
++
++struct imx6_hdmi_acr_def {
++ int CLK;
++ struct imx6_hdmi_acr imx6_data[3];
++};
++
++static const struct imx6_hdmi_acr_def imx6_hdmi_conv[] = {
++ /* 32kHz 44.1kHz 48kHz */
++ /* Clock N CTS N CTS N CTS */
++ { 2517/*5*/, { { 32000, 4096, 25175 }, { 44100, 28224, 125875 }, { 48000, 6144, 25175 } } }, /* 25,20/1.001 MHz */
++ { 2520/*0*/, { { 32000, 4096, 25200 }, { 44100, 6272, 28000 }, { 48000, 6144, 25200 } } }, /* 25.20 MHz */
++ { 2700/*0*/, { { 32000, 4096, 27000 }, { 44100, 6272, 30000 }, { 48000, 6144, 27000 } } }, /* 27.00 MHz */
++ { 2702/*7*/, { { 32000, 4096, 27027 }, { 44100, 6272, 30030 }, { 48000, 6144, 27027 } } }, /* 27.00*1.001 MHz */
++ { 5400/*0*/, { { 32000, 4096, 54000 }, { 44100, 6272, 60000 }, { 48000, 6144, 54000 } } }, /* 54.00 MHz */
++ { 5405/*4*/, { { 32000, 4096, 54054 }, { 44100, 6272, 60060 }, { 48000, 6144, 54054 } } }, /* 54.00*1.001 MHz */
++ { 7417/*6*/, { { 32000, 4096, 74176 }, { 44100, 5733, 75335 }, { 48000, 6144, 74176 } } }, /* 74.25/1.001 MHz */
++ { 7425/*0*/, { { 32000, 4096, 74250 }, { 44100, 6272, 82500 }, { 48000, 6144, 74250 } } }, /* 74.25 MHz */
++ {14836/*2*/, { { 32000, 4096, 148352 }, { 44100, 5733, 150670 }, { 48000, 6144, 148352 } } }, /* 148.50/1.001 MHz */
++ {14850/*0*/, { { 32000, 4096, 148500 }, { 44100, 6272, 165000 }, { 48000, 6144, 148500 } } }, /* 148.50 MHz */
++ { 0, { { 32000, 4096, 0 }, { 44100, 6272, 0 }, { 48000, 6144, 0 } } } /* Other */
++};
++
++static void hdmi_compute_cts_n(unsigned int freq, unsigned long pixel_clk,
++ unsigned int *n, unsigned int *cts)
++{
++ unsigned int clk = pixel_clk / 10000;
++ unsigned int frq = freq;
++ int i, j;
++
++ *n = 1;
++ switch (frq) {
++ case 88200:
++ frq = 44100;
++ *n = 2;
++ break;
++ case 96000:
++ frq = 48000;
++ *n = 2;
++ break;
++ case 176400:
++ frq = 44100;
++ *n = 4;
++ break;
++ case 192000:
++ frq = 48000;
++ *n = 4;
++ break;
++ default:
++ break;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(imx6_hdmi_conv); i++) {
++ if (imx6_hdmi_conv[i].CLK == clk || imx6_hdmi_conv[i].CLK == 0) {
++ for (j = 0; j < 3; j++) {
++ if (imx6_hdmi_conv[i].imx6_data[j].FREQ == frq) {
++ *n *= imx6_hdmi_conv[i].imx6_data[j].N;
++ *cts = imx6_hdmi_conv[i].imx6_data[j].CTS;
++ return;
++ }
++ }
++ }
++ }
++}
++
++static void hdmi_set_clk_regenerator(void)
++{
++ unsigned int clk_n, clk_cts = 0;
++
++ hdmi_compute_cts_n(sample_rate, pixel_clk_rate, &clk_n, &clk_cts);
++
++ if (clk_cts == 0) {
++ clk_n = (128 * sample_rate) / 1000;
++ clk_cts = (clk_n / 128) * pixel_clk_rate;
++ clk_cts += (clk_n % 128) * pixel_clk_rate / 128;
++ clk_cts /= (sample_rate / 10);
++ pr_debug("%s: pixel clock not supported - fallback calculation: %d, N=%d, cts: %d\n",
++ __func__, (int)pixel_clk_rate, clk_n, clk_cts);
++ }
++// if (ratio != 100)
++// clk_cts = (clk_cts * ratio) / 100;
++
++ pr_debug("%s: samplerate=%d ratio=%d pixelclk=%d N=%d cts=%d\n",
++ __func__, sample_rate, hdmi_ratio, (int)pixel_clk_rate,
++ clk_n, clk_cts);
++
++ hdmi_set_clock_regenerator_cts(clk_cts);
++ hdmi_set_clock_regenerator_n(clk_n);
++}
++
++static int hdmi_core_get_of_property(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ int ipu_id, disp_id;
++
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++
++ mxc_hdmi_ipu_id = ipu_id;
++ mxc_hdmi_disp_id = disp_id;
++
++ return err;
++}
++
++/* Need to run this before phy is enabled the first time to prevent
++ * overflow condition in HDMI_IH_FC_STAT2 */
++void hdmi_init_clk_regenerator(void)
++{
++ if (pixel_clk_rate == 0) {
++ pixel_clk_rate = 74250000;
++ hdmi_set_clk_regenerator();
++ }
++}
++EXPORT_SYMBOL(hdmi_init_clk_regenerator);
++
++void hdmi_clk_regenerator_update_pixel_clock(u32 pixclock)
++{
++
++ /* Translate pixel clock in ps (pico seconds) to Hz */
++ pixel_clk_rate = PICOS2KHZ(pixclock) * 1000UL;
++ hdmi_set_clk_regenerator();
++}
++EXPORT_SYMBOL(hdmi_clk_regenerator_update_pixel_clock);
++
++void hdmi_set_dma_mode(unsigned int dma_running)
++{
++ hdmi_dma_running = dma_running;
++ hdmi_set_clk_regenerator();
++}
++EXPORT_SYMBOL(hdmi_set_dma_mode);
++
++void hdmi_set_sample_rate(unsigned int rate)
++{
++ sample_rate = rate;
++}
++EXPORT_SYMBOL(hdmi_set_sample_rate);
++
++void hdmi_set_edid_cfg(int edid_status, struct mxc_edid_cfg *cfg)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&edid_spinlock, flags);
++ hdmi_core_edid_status = edid_status;
++ memcpy(&hdmi_core_edid_cfg, cfg, sizeof(struct mxc_edid_cfg));
++ spin_unlock_irqrestore(&edid_spinlock, flags);
++}
++EXPORT_SYMBOL(hdmi_set_edid_cfg);
++
++int hdmi_get_edid_cfg(struct mxc_edid_cfg *cfg)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&edid_spinlock, flags);
++ memcpy(cfg, &hdmi_core_edid_cfg, sizeof(struct mxc_edid_cfg));
++ spin_unlock_irqrestore(&edid_spinlock, flags);
++
++ return hdmi_core_edid_status;
++}
++EXPORT_SYMBOL(hdmi_get_edid_cfg);
++
++void hdmi_set_registered(int registered)
++{
++ hdmi_core_init = registered;
++}
++EXPORT_SYMBOL(hdmi_set_registered);
++
++int hdmi_get_registered(void)
++{
++ return hdmi_core_init;
++}
++EXPORT_SYMBOL(hdmi_get_registered);
++
++static int mxc_hdmi_core_probe(struct platform_device *pdev)
++{
++ struct mxc_hdmi_data *hdmi_data;
++ struct resource *res;
++ unsigned long flags;
++ int ret = 0;
++
++#ifdef DEBUG
++ overflow_lo = false;
++ overflow_hi = false;
++#endif
++
++ hdmi_core_init = 0;
++ hdmi_dma_running = 0;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -ENOENT;
++
++ ret = hdmi_core_get_of_property(pdev);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get hdmi of property fail\n");
++ return -ENOENT;
++ }
++
++ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(struct mxc_hdmi_data), GFP_KERNEL);
++ if (!hdmi_data) {
++ dev_err(&pdev->dev, "Couldn't allocate mxc hdmi mfd device\n");
++ return -ENOMEM;
++ }
++ hdmi_data->pdev = pdev;
++
++ pixel_clk = NULL;
++ sample_rate = 48000;
++ pixel_clk_rate = 0;
++ hdmi_ratio = 100;
++
++ spin_lock_init(&irq_spinlock);
++ spin_lock_init(&edid_spinlock);
++
++
++ spin_lock_init(&hdmi_cable_state_lock);
++ spin_lock_init(&hdmi_blank_state_lock);
++ spin_lock_init(&hdmi_audio_lock);
++
++ spin_lock_irqsave(&hdmi_cable_state_lock, flags);
++ hdmi_cable_state = 0;
++ spin_unlock_irqrestore(&hdmi_cable_state_lock, flags);
++
++ spin_lock_irqsave(&hdmi_blank_state_lock, flags);
++ hdmi_blank_state = 0;
++ spin_unlock_irqrestore(&hdmi_blank_state_lock, flags);
++
++ spin_lock_irqsave(&hdmi_audio_lock, flags);
++ hdmi_audio_stream_playback = NULL;
++ hdmi_abort_state = 0;
++ spin_unlock_irqrestore(&hdmi_audio_lock, flags);
++
++ isfr_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_isfr");
++ if (IS_ERR(isfr_clk)) {
++ ret = PTR_ERR(isfr_clk);
++ dev_err(&hdmi_data->pdev->dev,
++ "Unable to get HDMI isfr clk: %d\n", ret);
++ goto eclkg;
++ }
++
++ ret = clk_prepare_enable(isfr_clk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret);
++ goto eclke;
++ }
++
++ pr_debug("%s isfr_clk:%d\n", __func__,
++ (int)clk_get_rate(isfr_clk));
++
++ iahb_clk = clk_get(&hdmi_data->pdev->dev, "hdmi_iahb");
++ if (IS_ERR(iahb_clk)) {
++ ret = PTR_ERR(iahb_clk);
++ dev_err(&hdmi_data->pdev->dev,
++ "Unable to get HDMI iahb clk: %d\n", ret);
++ goto eclkg2;
++ }
++
++ ret = clk_prepare_enable(iahb_clk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Cannot enable HDMI clock: %d\n", ret);
++ goto eclke2;
++ }
++
++ hdmi_data->reg_phys_base = res->start;
++ if (!request_mem_region(res->start, resource_size(res),
++ dev_name(&pdev->dev))) {
++ dev_err(&pdev->dev, "request_mem_region failed\n");
++ ret = -EBUSY;
++ goto emem;
++ }
++
++ hdmi_data->reg_base = ioremap(res->start, resource_size(res));
++ if (!hdmi_data->reg_base) {
++ dev_err(&pdev->dev, "ioremap failed\n");
++ ret = -ENOMEM;
++ goto eirq;
++ }
++ hdmi_base = hdmi_data->reg_base;
++
++ pr_debug("\n%s hdmi hw base = 0x%08x\n\n", __func__, (int)res->start);
++
++ initialize_hdmi_ih_mutes();
++
++ /* Disable HDMI clocks until video/audio sub-drivers are initialized */
++ clk_disable_unprepare(isfr_clk);
++ clk_disable_unprepare(iahb_clk);
++
++ /* Replace platform data coming in with a local struct */
++ platform_set_drvdata(pdev, hdmi_data);
++
++ return ret;
++
++eirq:
++ release_mem_region(res->start, resource_size(res));
++emem:
++ clk_disable_unprepare(iahb_clk);
++eclke2:
++ clk_put(iahb_clk);
++eclkg2:
++ clk_disable_unprepare(isfr_clk);
++eclke:
++ clk_put(isfr_clk);
++eclkg:
++ return ret;
++}
++
++
++static int __exit mxc_hdmi_core_remove(struct platform_device *pdev)
++{
++ struct mxc_hdmi_data *hdmi_data = platform_get_drvdata(pdev);
++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ iounmap(hdmi_data->reg_base);
++ release_mem_region(res->start, resource_size(res));
++
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx6q-hdmi-core", },
++ { .compatible = "fsl,imx6dl-hdmi-core", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_hdmi_core_driver = {
++ .driver = {
++ .name = "mxc_hdmi_core",
++ .of_match_table = imx_hdmi_dt_ids,
++ .owner = THIS_MODULE,
++ },
++ .remove = __exit_p(mxc_hdmi_core_remove),
++};
++
++static int __init mxc_hdmi_core_init(void)
++{
++ return platform_driver_probe(&mxc_hdmi_core_driver,
++ mxc_hdmi_core_probe);
++}
++
++static void __exit mxc_hdmi_core_exit(void)
++{
++ platform_driver_unregister(&mxc_hdmi_core_driver);
++}
++
++subsys_initcall(mxc_hdmi_core_init);
++module_exit(mxc_hdmi_core_exit);
++
++MODULE_DESCRIPTION("Core driver for Freescale i.Mx on-chip HDMI");
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/drivers/misc/sram.c linux-xbian-imx6/drivers/misc/sram.c
+--- linux-4.1.3/drivers/misc/sram.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/misc/sram.c 2015-07-27 23:13:06.095235186 +0200
+@@ -17,6 +17,7 @@
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
++#define DEBUG
+
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+diff -Nur linux-4.1.3/drivers/mmc/core/core.c linux-xbian-imx6/drivers/mmc/core/core.c
+--- linux-4.1.3/drivers/mmc/core/core.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/mmc/core/core.c 2015-07-27 23:13:06.115164084 +0200
+@@ -13,11 +13,13 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/clk.h>
+ #include <linux/completion.h>
+ #include <linux/device.h>
+ #include <linux/delay.h>
+ #include <linux/pagemap.h>
+ #include <linux/err.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/leds.h>
+ #include <linux/scatterlist.h>
+ #include <linux/log2.h>
+@@ -1601,6 +1603,43 @@
+ mmc_host_clk_release(host);
+ }
+
++static void mmc_card_power_up(struct mmc_host *host)
++{
++ int i;
++ struct gpio_desc **gds = host->card_reset_gpios;
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ if (gds[i]) {
++ dev_dbg(host->parent, "Asserting reset line %d", i);
++ gpiod_set_value(gds[i], 1);
++ }
++ }
++
++ if (host->card_regulator) {
++ dev_dbg(host->parent, "Enabling external regulator");
++ if (regulator_enable(host->card_regulator))
++ dev_err(host->parent, "Failed to enable external regulator");
++ }
++
++ if (host->card_clk) {
++ dev_dbg(host->parent, "Enabling external clock");
++ clk_prepare_enable(host->card_clk);
++ }
++
++ /* 2ms delay to let clocks and power settle */
++ mmc_delay(20);
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ if (gds[i]) {
++ dev_dbg(host->parent, "Deasserting reset line %d", i);
++ gpiod_set_value(gds[i], 0);
++ }
++ }
++
++ /* 2ms delay to after reset release */
++ mmc_delay(20);
++}
++
+ /*
+ * Apply power to the MMC stack. This is a two-stage process.
+ * First, we enable power to the card without the clock running.
+@@ -1661,6 +1700,9 @@
+ if (host->ios.power_mode == MMC_POWER_OFF)
+ return;
+
++ /* Power up the card/module first, if needed */
++ mmc_card_power_up(host);
++
+ mmc_host_clk_hold(host);
+
+ mmc_pwrseq_power_off(host);
+@@ -1686,7 +1728,7 @@
+ {
+ mmc_power_off(host);
+ /* Wait at least 1 ms according to SD spec */
+- mmc_delay(1);
++ mmc_delay(3);
+ mmc_power_up(host, ocr);
+ }
+
+diff -Nur linux-4.1.3/drivers/mmc/core/host.c linux-xbian-imx6/drivers/mmc/core/host.c
+--- linux-4.1.3/drivers/mmc/core/host.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/mmc/core/host.c 2015-07-27 23:13:06.115164084 +0200
+@@ -12,14 +12,18 @@
+ * MMC host class device management
+ */
+
++#include <linux/kernel.h>
++#include <linux/clk.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/idr.h>
+ #include <linux/of.h>
+ #include <linux/of_gpio.h>
+ #include <linux/pagemap.h>
+ #include <linux/export.h>
+ #include <linux/leds.h>
++#include <linux/regulator/consumer.h>
+ #include <linux/slab.h>
+ #include <linux/suspend.h>
+
+@@ -454,6 +458,66 @@
+
+ EXPORT_SYMBOL(mmc_of_parse);
+
++static int mmc_of_parse_child(struct mmc_host *host)
++{
++ struct device_node *np;
++ struct clk *clk;
++ int i;
++
++ if (!host->parent || !host->parent->of_node)
++ return 0;
++
++ np = host->parent->of_node;
++
++ host->card_regulator = regulator_get(host->parent, "card-external-vcc");
++ if (IS_ERR(host->card_regulator)) {
++ if (PTR_ERR(host->card_regulator) == -EPROBE_DEFER)
++ return PTR_ERR(host->card_regulator);
++ host->card_regulator = NULL;
++ }
++
++ /* Parse card power/reset/clock control */
++ if (of_find_property(np, "card-reset-gpios", NULL)) {
++ struct gpio_desc *gpd;
++ int level = 0;
++
++ /*
++ * If the regulator is enabled, then we can hold the
++ * card in reset with an active high resets. Otherwise,
++ * hold the resets low.
++ */
++ if (host->card_regulator && regulator_is_enabled(host->card_regulator))
++ level = 1;
++
++ for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) {
++ gpd = devm_gpiod_get_index(host->parent, "card-reset", i);
++ if (IS_ERR(gpd)) {
++ if (PTR_ERR(gpd) == -EPROBE_DEFER)
++ return PTR_ERR(gpd);
++ break;
++ }
++ gpiod_direction_output(gpd, gpiod_is_active_low(gpd) | level);
++ host->card_reset_gpios[i] = gpd;
++ }
++
++ gpd = devm_gpiod_get_index(host->parent, "card-reset", ARRAY_SIZE(host->card_reset_gpios));
++ if (!IS_ERR(gpd)) {
++ dev_warn(host->parent, "More reset gpios than we can handle");
++ gpiod_put(gpd);
++ }
++ }
++
++ clk = of_clk_get_by_name(np, "card_ext_clock");
++ if (IS_ERR(clk)) {
++ if (PTR_ERR(clk) == -EPROBE_DEFER)
++ return PTR_ERR(clk);
++ clk = NULL;
++ }
++ host->card_clk = clk;
++
++ return 0;
++}
++
+ /**
+ * mmc_alloc_host - initialise the per-host structure.
+ * @extra: sizeof private data structure
+@@ -533,6 +597,10 @@
+ {
+ int err;
+
++ err = mmc_of_parse_child(host);
++ if (err)
++ return err;
++
+ WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&
+ !host->ops->enable_sdio_irq);
+
+diff -Nur linux-4.1.3/drivers/mmc/core/mmc_ops.c linux-xbian-imx6/drivers/mmc/core/mmc_ops.c
+--- linux-4.1.3/drivers/mmc/core/mmc_ops.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/mmc/core/mmc_ops.c 2015-07-27 23:13:06.115164084 +0200
+@@ -513,6 +513,12 @@
+ return 0;
+
+ /*
++ * WORKAROUND: for Sandisk eMMC cards, it might need certain delay
++ * before sending CMD13 after CMD6
++ */
++ mdelay(1);
++
++ /*
+ * CRC errors shall only be ignored in cases were CMD13 is used to poll
+ * to detect busy completion.
+ */
+diff -Nur linux-4.1.3/drivers/mmc/core/sd.c linux-xbian-imx6/drivers/mmc/core/sd.c
+--- linux-4.1.3/drivers/mmc/core/sd.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/mmc/core/sd.c 2015-07-27 23:13:06.119149862 +0200
+@@ -521,6 +521,13 @@
+ else {
+ mmc_set_timing(card->host, timing);
+ mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
++
++ /*
++ * FIXME: Sandisk SD3.0 cards DDR50 mode requires such
++ * delay to get stable, without this delay we may encounter
++ * CRC errors after switch to DDR50 mode
++ */
++ mmc_delay(100);
+ }
+
+ return 0;
+diff -Nur linux-4.1.3/drivers/mmc/host/sdhci-esdhc-imx.c linux-xbian-imx6/drivers/mmc/host/sdhci-esdhc-imx.c
+--- linux-4.1.3/drivers/mmc/host/sdhci-esdhc-imx.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/mmc/host/sdhci-esdhc-imx.c 2015-07-27 23:13:06.131107202 +0200
+@@ -65,6 +65,8 @@
+ /* NOTE: the minimum valid tuning start tap for mx6sl is 1 */
+ #define ESDHC_TUNING_START_TAP 0x1
+
++#define ESDHC_TUNING_BLOCK_PATTERN_LEN 64
++
+ /* pinctrl state */
+ #define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz"
+ #define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz"
+@@ -112,6 +114,12 @@
+ #define ESDHC_FLAG_STD_TUNING BIT(5)
+ /* The IP has SDHCI_CAPABILITIES_1 register */
+ #define ESDHC_FLAG_HAVE_CAP1 BIT(6)
++/*
++ * The IP has errata ERR004536
++ * uSDHC: ADMA Length Mismatch Error occurs if the AHB read access is slow,
++ * when reading data from the card
++ */
++#define ESDHC_FLAG_ERR004536 BIT(7)
+
+ struct esdhc_soc_data {
+ u32 flags;
+@@ -445,6 +453,10 @@
+ if (val & SDHCI_CTRL_EXEC_TUNING) {
+ v |= ESDHC_MIX_CTRL_EXE_TUNE;
+ m |= ESDHC_MIX_CTRL_FBCLK_SEL;
++ writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
++ ESDHC_STD_TUNING_EN |
++ ESDHC_TUNING_START_TAP,
++ host->ioaddr + ESDHC_TUNING_CTRL);
+ } else {
+ v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
+ }
+@@ -700,6 +712,56 @@
+ val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS));
+ }
+
++static void esdhc_request_done(struct mmc_request *mrq)
++{
++ complete(&mrq->completion);
++}
++
++static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode,
++ struct scatterlist *sg)
++{
++ struct mmc_command cmd = {0};
++ struct mmc_request mrq = {NULL};
++ struct mmc_data data = {0};
++
++ cmd.opcode = opcode;
++ cmd.arg = 0;
++ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
++
++ data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN;
++ data.blocks = 1;
++ data.flags = MMC_DATA_READ;
++ data.sg = sg;
++ data.sg_len = 1;
++
++ mrq.cmd = &cmd;
++ mrq.cmd->mrq = &mrq;
++ mrq.data = &data;
++ mrq.data->mrq = &mrq;
++ mrq.cmd->data = mrq.data;
++
++ mrq.done = esdhc_request_done;
++ init_completion(&(mrq.completion));
++
++ disable_irq(host->irq);
++ spin_lock(&host->lock);
++ host->mrq = &mrq;
++
++ sdhci_send_command(host, mrq.cmd);
++
++ spin_unlock(&host->lock);
++ enable_irq(host->irq);
++
++ wait_for_completion(&mrq.completion);
++
++ if (cmd.error)
++ return cmd.error;
++ if (data.error)
++ return data.error;
++
++ return 0;
++}
++
+ static void esdhc_post_tuning(struct sdhci_host *host)
+ {
+ u32 reg;
+@@ -711,13 +773,21 @@
+
+ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
+ {
++ struct scatterlist sg;
++ char *tuning_pattern;
+ int min, max, avg, ret;
+
++ tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL);
++ if (!tuning_pattern)
++ return -ENOMEM;
++
++ sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN);
++
+ /* find the mininum delay first which can pass tuning */
+ min = ESDHC_TUNE_CTRL_MIN;
+ while (min < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, min);
+- if (!mmc_send_tuning(host->mmc))
++ if (!esdhc_send_tuning_cmd(host, opcode, &sg))
+ break;
+ min += ESDHC_TUNE_CTRL_STEP;
+ }
+@@ -726,7 +796,7 @@
+ max = min + ESDHC_TUNE_CTRL_STEP;
+ while (max < ESDHC_TUNE_CTRL_MAX) {
+ esdhc_prepare_tuning(host, max);
+- if (mmc_send_tuning(host->mmc)) {
++ if (esdhc_send_tuning_cmd(host, opcode, &sg)) {
+ max -= ESDHC_TUNE_CTRL_STEP;
+ break;
+ }
+@@ -736,9 +806,11 @@
+ /* use average delay to get the best timing */
+ avg = (min + max) / 2;
+ esdhc_prepare_tuning(host, avg);
+- ret = mmc_send_tuning(host->mmc);
++ ret = esdhc_send_tuning_cmd(host, opcode, &sg);
+ esdhc_post_tuning(host);
+
++ kfree(tuning_pattern);
++
+ dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
+ ret ? "failed" : "passed", avg, ret);
+
+@@ -901,6 +973,9 @@
+ if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line))
+ boarddata->delay_line = 0;
+
++ if (of_find_property(np, "keep-power-in-suspend", NULL))
++ host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
++
+ mmc_of_parse_voltage(np, &host->ocr_mask);
+
+ return 0;
+@@ -997,10 +1072,8 @@
+ sdhci_esdhc_ops.platform_execute_tuning =
+ esdhc_executing_tuning;
+
+- if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
+- writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) |
+- ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP,
+- host->ioaddr + ESDHC_TUNING_CTRL);
++ if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
++ host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
+
+ boarddata = &imx_data->boarddata;
+ if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) {
+diff -Nur linux-4.1.3/drivers/mxc/asrc/Kconfig linux-xbian-imx6/drivers/mxc/asrc/Kconfig
+--- linux-4.1.3/drivers/mxc/asrc/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/asrc/Kconfig 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,14 @@
++#
++# ASRC configuration
++#
++
++menu "MXC Asynchronous Sample Rate Converter support"
++
++config MXC_ASRC
++ tristate "ASRC support"
++ depends on SOC_IMX35 || SOC_IMX53 || SOC_IMX6
++# select SND_SOC_FSL_ASRC
++ ---help---
++ Say Y to get the ASRC service.
++
++endmenu
+diff -Nur linux-4.1.3/drivers/mxc/asrc/Makefile linux-xbian-imx6/drivers/mxc/asrc/Makefile
+--- linux-4.1.3/drivers/mxc/asrc/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/asrc/Makefile 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,4 @@
++#
++# Makefile for the kernel Asynchronous Sample Rate Converter driver
++#
++obj-$(CONFIG_MXC_ASRC) += mxc_asrc.o
+diff -Nur linux-4.1.3/drivers/mxc/asrc/mxc_asrc.c linux-xbian-imx6/drivers/mxc/asrc/mxc_asrc.c
+--- linux-4.1.3/drivers/mxc/asrc/mxc_asrc.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/asrc/mxc_asrc.c 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,2045 @@
++/*
++ * Freescale Asynchronous Sample Rate Converter (ASRC) driver
++ *
++ * Copyright 2008-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#include <linux/clk.h>
++#include <linux/slab.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/regmap.h>
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <linux/pagemap.h>
++#include <linux/interrupt.h>
++#include <linux/miscdevice.h>
++#include <linux/dma-mapping.h>
++#include <linux/of_platform.h>
++#include <linux/platform_data/dma-imx.h>
++
++#include <linux/mxc_asrc.h>
++
++#define ASRC_PROC_PATH "driver/asrc"
++
++#define ASRC_RATIO_DECIMAL_DEPTH 26
++
++#define pair_err(fmt, ...) \
++ dev_err(asrc->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
++
++#define pair_dbg(fmt, ...) \
++ dev_dbg(asrc->dev, "Pair %c: " fmt, 'A' + index, ##__VA_ARGS__)
++
++DEFINE_SPINLOCK(data_lock);
++DEFINE_SPINLOCK(pair_lock);
++
++/* Sample rates are aligned with that defined in pcm.h file */
++static const unsigned char asrc_process_table[][8][2] = {
++ /* 32kHz 44.1kHz 48kHz 64kHz 88.2kHz 96kHz 176kHz 192kHz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 5512Hz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 8kHz */
++ {{0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 11025Hz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 16kHz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0},}, /* 22050Hz */
++ {{0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0}, {0, 0},}, /* 32kHz */
++ {{0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0},}, /* 44.1kHz */
++ {{0, 2}, {0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0}, {0, 0},}, /* 48kHz */
++ {{1, 2}, {0, 2}, {0, 2}, {0, 1}, {0, 1}, {0, 1}, {0, 1}, {0, 0},}, /* 64kHz */
++ {{1, 2}, {1, 2}, {1, 2}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1},}, /* 88.2kHz */
++ {{1, 2}, {1, 2}, {1, 2}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1},}, /* 96kHz */
++ {{2, 2}, {2, 2}, {2, 2}, {2, 1}, {2, 1}, {2, 1}, {2, 1}, {2, 1},}, /* 176kHz */
++ {{2, 2}, {2, 2}, {2, 2}, {2, 1}, {2, 1}, {2, 1}, {2, 1}, {2, 1},}, /* 192kHz */
++};
++
++static struct asrc_data *asrc;
++
++/*
++ * The following tables map the relationship between asrc_inclk/asrc_outclk in
++ * mxc_asrc.h and the registers of ASRCSR
++ */
++static unsigned char input_clk_map_v1[] = {
++ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
++};
++
++static unsigned char output_clk_map_v1[] = {
++ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
++};
++
++/* V2 uses the same map for input and output */
++static unsigned char input_clk_map_v2[] = {
++/* 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 0x8 0x9 0xa 0xb 0xc 0xd 0xe 0xf */
++ 0x0, 0x1, 0x2, 0x7, 0x4, 0x5, 0x6, 0x3, 0x8, 0x9, 0xa, 0xb, 0xc, 0xf, 0xe, 0xd,
++};
++
++static unsigned char output_clk_map_v2[] = {
++/* 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 0x8 0x9 0xa 0xb 0xc 0xd 0xe 0xf */
++ 0x8, 0x9, 0xa, 0x7, 0xc, 0x5, 0x6, 0xb, 0x0, 0x1, 0x2, 0x3, 0x4, 0xf, 0xe, 0xd,
++};
++
++static unsigned char *input_clk_map, *output_clk_map;
++
++enum mxc_asrc_type {
++ IMX35_ASRC,
++ IMX53_ASRC,
++};
++
++static const struct platform_device_id mxc_asrc_devtype[] = {
++ {
++ .name = "imx35-asrc",
++ .driver_data = IMX35_ASRC,
++ }, {
++ .name = "imx53-asrc",
++ .driver_data = IMX53_ASRC,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, mxc_asrc_devtype);
++
++static const struct of_device_id fsl_asrc_ids[] = {
++ {
++ .compatible = "fsl,imx35-asrc",
++ .data = &mxc_asrc_devtype[IMX35_ASRC],
++ }, {
++ .compatible = "fsl,imx53-asrc",
++ .data = &mxc_asrc_devtype[IMX53_ASRC],
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(of, fsl_asrc_ids);
++
++
++#ifdef DEBUG
++u32 asrc_reg[] = {
++ REG_ASRCTR,
++ REG_ASRIER,
++ REG_ASRCNCR,
++ REG_ASRCFG,
++ REG_ASRCSR,
++ REG_ASRCDR1,
++ REG_ASRCDR2,
++ REG_ASRSTR,
++ REG_ASRRA,
++ REG_ASRRB,
++ REG_ASRRC,
++ REG_ASRPM1,
++ REG_ASRPM2,
++ REG_ASRPM3,
++ REG_ASRPM4,
++ REG_ASRPM5,
++ REG_ASRTFR1,
++ REG_ASRCCR,
++ REG_ASRIDRHA,
++ REG_ASRIDRLA,
++ REG_ASRIDRHB,
++ REG_ASRIDRLB,
++ REG_ASRIDRHC,
++ REG_ASRIDRLC,
++ REG_ASR76K,
++ REG_ASR56K,
++ REG_ASRMCRA,
++ REG_ASRFSTA,
++ REG_ASRMCRB,
++ REG_ASRFSTB,
++ REG_ASRMCRC,
++ REG_ASRFSTC,
++ REG_ASRMCR1A,
++ REG_ASRMCR1B,
++ REG_ASRMCR1C,
++};
++
++static void dump_regs(void)
++{
++ u32 reg, val;
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(asrc_reg); i++) {
++ reg = asrc_reg[i];
++ regmap_read(asrc->regmap, reg, &val);
++ dev_dbg(asrc->dev, "REG addr=0x%x val=0x%x\n", reg, val);
++ }
++}
++#else
++static void dump_regs(void) {}
++#endif
++
++/* Only used for Ideal Ratio mode */
++static int asrc_set_clock_ratio(enum asrc_pair_index index,
++ int inrate, int outrate)
++{
++ unsigned long val = 0;
++ int integ, i;
++
++ if (outrate == 0) {
++ dev_err(asrc->dev, "wrong output sample rate: %d\n", outrate);
++ return -EINVAL;
++ }
++
++ /* Formula: r = (1 << ASRC_RATIO_DECIMAL_DEPTH) / outrate * inrate; */
++ for (integ = 0; inrate >= outrate; integ++)
++ inrate -= outrate;
++
++ val |= (integ << ASRC_RATIO_DECIMAL_DEPTH);
++
++ for (i = 1; i <= ASRC_RATIO_DECIMAL_DEPTH; i++) {
++ if ((inrate * 2) >= outrate) {
++ val |= (1 << (ASRC_RATIO_DECIMAL_DEPTH - i));
++ inrate = inrate * 2 - outrate;
++ } else
++ inrate = inrate << 1;
++
++ if (inrate == 0)
++ break;
++ }
++
++ regmap_write(asrc->regmap, REG_ASRIDRL(index), val);
++ regmap_write(asrc->regmap, REG_ASRIDRH(index), (val >> 24));
++
++ return 0;
++}
++
++/* Corresponding to asrc_process_table */
++static int supported_input_rate[] = {
++ 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200,
++ 96000, 176400, 192000,
++};
++
++static int supported_output_rate[] = {
++ 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000,
++};
++
++static int asrc_set_process_configuration(enum asrc_pair_index index,
++ int inrate, int outrate)
++{
++ int in, out;
++
++ for (in = 0; in < ARRAY_SIZE(supported_input_rate); in++) {
++ if (inrate == supported_input_rate[in])
++ break;
++ }
++
++ if (in == ARRAY_SIZE(supported_input_rate)) {
++ dev_err(asrc->dev, "unsupported input sample rate: %d\n", in);
++ return -EINVAL;
++ }
++
++ for (out = 0; out < ARRAY_SIZE(supported_output_rate); out++) {
++ if (outrate == supported_output_rate[out])
++ break;
++ }
++
++ if (out == ARRAY_SIZE(supported_output_rate)) {
++ dev_err(asrc->dev, "unsupported output sample rate: %d\n", out);
++ return -EINVAL;
++ }
++
++ regmap_update_bits(asrc->regmap, REG_ASRCFG,
++ ASRCFG_PREMODx_MASK(index) | ASRCFG_POSTMODx_MASK(index),
++ ASRCFG_PREMOD(index, asrc_process_table[in][out][0]) |
++ ASRCFG_POSTMOD(index, asrc_process_table[in][out][1]));
++
++ return 0;
++}
++
++static int asrc_get_asrck_clock_divider(int samplerate)
++{
++ unsigned int prescaler, divider, ratio, ra, i;
++ unsigned long bitclk;
++
++ if (samplerate == 0) {
++ dev_err(asrc->dev, "invalid sample rate: %d\n", samplerate);
++ return -EINVAL;
++ }
++
++ bitclk = clk_get_rate(asrc->asrck_clk);
++
++ ra = bitclk / samplerate;
++ ratio = ra;
++
++ /* Calculate the prescaler */
++ for (i = 0; ratio > 8; i++)
++ ratio >>= 1;
++
++ prescaler = i;
++
++ /* Calculate the divider */
++ divider = i ? (((ra + (1 << (i - 1)) - 1) >> i) - 1) : (ra - 1);
++
++ /* The totally divider is (2 ^ prescaler) * divider */
++ return (divider << ASRCDRx_AxCPx_WIDTH) + prescaler;
++}
++
++int asrc_req_pair(int chn_num, enum asrc_pair_index *index)
++{
++ int imax = 0, busy = 0, i, ret = 0;
++ unsigned long lock_flags;
++ struct asrc_pair *pair;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ for (i = ASRC_PAIR_A; i < ASRC_PAIR_MAX_NUM; i++) {
++ pair = &asrc->asrc_pair[i];
++ if (chn_num > pair->chn_max) {
++ imax++;
++ continue;
++ } else if (pair->active) {
++ busy++;
++ continue;
++ }
++ /* Save the current qualified pair */
++ *index = i;
++
++ /* Check if this pair is a perfect one */
++ if (chn_num == pair->chn_max)
++ break;
++ }
++
++ if (imax == ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "no pair could afford required channel number\n");
++ ret = -EINVAL;
++ } else if (busy == ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "all pairs are busy now\n");
++ ret = -EBUSY;
++ } else if (busy + imax >= ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "all affordable pairs are busy now\n");
++ ret = -EBUSY;
++ } else {
++ pair = &asrc->asrc_pair[*index];
++ pair->chn_num = chn_num;
++ pair->active = 1;
++ }
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ if (!ret) {
++ clk_prepare_enable(asrc->mem_clk);
++ clk_prepare_enable(asrc->ipg_clk);
++ clk_prepare_enable(asrc->asrck_clk);
++ clk_prepare_enable(asrc->dma_clk);
++ }
++
++ return ret;
++}
++EXPORT_SYMBOL(asrc_req_pair);
++
++void asrc_release_pair(enum asrc_pair_index index)
++{
++ struct asrc_pair *pair = &asrc->asrc_pair[index];
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ pair->active = 0;
++ pair->overload_error = 0;
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ /* Disable PAIR */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEx_MASK(index), 0);
++}
++EXPORT_SYMBOL(asrc_release_pair);
++
++int asrc_config_pair(struct asrc_config *config)
++{
++ u32 inrate = config->input_sample_rate, indiv;
++ u32 outrate = config->output_sample_rate, outdiv;
++ int ret, channels, index = config->pair;
++ unsigned long lock_flags;
++
++ /* Set the channel number */
++ spin_lock_irqsave(&data_lock, lock_flags);
++ asrc->asrc_pair[index].chn_num = config->channel_num;
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++
++ if (asrc->channel_bits > 3)
++ channels = config->channel_num;
++ else
++ channels = (config->channel_num + 1) / 2;
++
++ /* Update channel number of current pair */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(index, asrc->channel_bits),
++ ASRCNCR_ANCx_set(index, channels, asrc->channel_bits));
++
++ /* Set the clock source */
++ regmap_update_bits(asrc->regmap, REG_ASRCSR,
++ ASRCSR_AICSx_MASK(index) | ASRCSR_AOCSx_MASK(index),
++ ASRCSR_AICS(index, input_clk_map[config->inclk]) |
++ ASRCSR_AOCS(index, output_clk_map[config->outclk]));
++
++ /* Default setting: Automatic selection for processing mode */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ATSx_MASK(index), ASRCTR_ATS(index));
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_USRx_MASK(index), 0);
++
++ /* Default Input Clock Divider Setting */
++ switch (config->inclk & ASRCSR_AxCSx_MASK) {
++ case INCLK_SPDIF_RX:
++ indiv = ASRC_PRESCALER_SPDIF_RX;
++ break;
++ case INCLK_SPDIF_TX:
++ indiv = ASRC_PRESCALER_SPDIF_TX;
++ break;
++ case INCLK_ASRCK1_CLK:
++ indiv = asrc_get_asrck_clock_divider(inrate);
++ break;
++ default:
++ switch (config->input_word_width) {
++ case ASRC_WIDTH_16_BIT:
++ indiv = ASRC_PRESCALER_I2S_16BIT;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ indiv = ASRC_PRESCALER_I2S_24BIT;
++ break;
++ default:
++ pair_err("unsupported input word width %d\n",
++ config->input_word_width);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* Default Output Clock Divider Setting */
++ switch (config->outclk & ASRCSR_AxCSx_MASK) {
++ case OUTCLK_SPDIF_RX:
++ outdiv = ASRC_PRESCALER_SPDIF_RX;
++ break;
++ case OUTCLK_SPDIF_TX:
++ outdiv = ASRC_PRESCALER_SPDIF_TX;
++ break;
++ case OUTCLK_ASRCK1_CLK:
++ if ((config->inclk & ASRCSR_AxCSx_MASK) == INCLK_NONE)
++ outdiv = ASRC_PRESCALER_IDEAL_RATIO;
++ else
++ outdiv = asrc_get_asrck_clock_divider(outrate);
++ break;
++ default:
++ switch (config->output_word_width) {
++ case ASRC_WIDTH_16_BIT:
++ outdiv = ASRC_PRESCALER_I2S_16BIT;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ outdiv = ASRC_PRESCALER_I2S_24BIT;
++ break;
++ default:
++ pair_err("unsupported output word width %d\n",
++ config->input_word_width);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* indiv and outdiv'd include prescaler's value, so add its MASK too */
++ regmap_update_bits(asrc->regmap, REG_ASRCDR(index),
++ ASRCDRx_AOCPx_MASK(index) | ASRCDRx_AICPx_MASK(index) |
++ ASRCDRx_AOCDx_MASK(index) | ASRCDRx_AICDx_MASK(index),
++ ASRCDRx_AOCP(index, outdiv) | ASRCDRx_AICP(index, indiv));
++
++ /* Check whether ideal ratio is a must */
++ switch (config->inclk & ASRCSR_AxCSx_MASK) {
++ case INCLK_NONE:
++ /* Clear ASTSx bit to use ideal ratio */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ATSx_MASK(index), 0);
++
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_IDRx_MASK(index) | ASRCTR_USRx_MASK(index),
++ ASRCTR_IDR(index) | ASRCTR_USR(index));
++
++ ret = asrc_set_clock_ratio(index, inrate, outrate);
++ if (ret)
++ return ret;
++
++ ret = asrc_set_process_configuration(index, inrate, outrate);
++ if (ret)
++ return ret;
++
++ break;
++ case INCLK_ASRCK1_CLK:
++ /* This case and default are both remained for v1 */
++ if (inrate == 44100 || inrate == 88200) {
++ pair_err("unsupported sample rate %d by selected clock\n",
++ inrate);
++ return -EINVAL;
++ }
++ break;
++ default:
++ if ((config->outclk & ASRCSR_AxCSx_MASK) != OUTCLK_ASRCK1_CLK)
++ break;
++
++ if (outrate == 44100 || outrate == 88200) {
++ pair_err("unsupported sample rate %d by selected clock\n",
++ outrate);
++ return -EINVAL;
++ }
++ break;
++ }
++
++ /* Config input and output wordwidth */
++ if (config->output_word_width == ASRC_WIDTH_8_BIT) {
++ pair_err("unsupported wordwidth for output: 8bit\n");
++ pair_err("output only support: 16bit or 24bit\n");
++ return -EINVAL;
++ }
++
++ regmap_update_bits(asrc->regmap, REG_ASRMCR1(index),
++ ASRMCR1x_OW16_MASK | ASRMCR1x_IWD_MASK,
++ ASRMCR1x_OW16(config->output_word_width) |
++ ASRMCR1x_IWD(config->input_word_width));
++
++ /* Enable BUFFER STALL */
++ regmap_update_bits(asrc->regmap, REG_ASRMCR(index),
++ ASRMCRx_BUFSTALLx_MASK, ASRMCRx_BUFSTALLx);
++
++ /* Set Threshold for input and output FIFO */
++ return asrc_set_watermark(index, ASRC_INPUTFIFO_THRESHOLD,
++ ASRC_INPUTFIFO_THRESHOLD);
++}
++EXPORT_SYMBOL(asrc_config_pair);
++
++int asrc_set_watermark(enum asrc_pair_index index, u32 in_wm, u32 out_wm)
++{
++ if (in_wm > ASRC_FIFO_THRESHOLD_MAX || out_wm > ASRC_FIFO_THRESHOLD_MAX) {
++ pair_err("invalid watermark!\n");
++ return -EINVAL;
++ }
++
++ return regmap_update_bits(asrc->regmap, REG_ASRMCR(index),
++ ASRMCRx_EXTTHRSHx_MASK | ASRMCRx_INFIFO_THRESHOLD_MASK |
++ ASRMCRx_OUTFIFO_THRESHOLD_MASK,
++ ASRMCRx_EXTTHRSHx | ASRMCRx_INFIFO_THRESHOLD(in_wm) |
++ ASRMCRx_OUTFIFO_THRESHOLD(out_wm));
++}
++EXPORT_SYMBOL(asrc_set_watermark);
++
++void asrc_start_conv(enum asrc_pair_index index)
++{
++ int reg, retry, channels, i;
++
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ASRCEx_MASK(index), ASRCTR_ASRCE(index));
++
++ /* Wait for status of initialization */
++ for (retry = 10, reg = 0; !reg && retry; --retry) {
++ udelay(5);
++ regmap_read(asrc->regmap, REG_ASRCFG, &reg);
++ reg &= ASRCFG_INIRQx_MASK(index);
++ }
++
++ /* Set the input fifo to ASRC STALL level */
++ regmap_read(asrc->regmap, REG_ASRCNCR, &reg);
++ channels = ASRCNCR_ANCx_get(index, reg, asrc->channel_bits);
++ for (i = 0; i < channels * 4; i++)
++ regmap_write(asrc->regmap, REG_ASRDI(index), 0);
++
++ /* Overload Interrupt Enable */
++ regmap_write(asrc->regmap, REG_ASRIER, ASRIER_AOLIE);
++}
++EXPORT_SYMBOL(asrc_start_conv);
++
++void asrc_stop_conv(enum asrc_pair_index index)
++{
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEx_MASK(index), 0);
++}
++EXPORT_SYMBOL(asrc_stop_conv);
++
++void asrc_finish_conv(enum asrc_pair_index index)
++{
++ clk_disable_unprepare(asrc->dma_clk);
++ clk_disable_unprepare(asrc->asrck_clk);
++ clk_disable_unprepare(asrc->ipg_clk);
++ clk_disable_unprepare(asrc->mem_clk);
++}
++EXPORT_SYMBOL(asrc_finish_conv);
++
++#define SET_OVERLOAD_ERR(index, err, msg) \
++ do { \
++ asrc->asrc_pair[index].overload_error |= err; \
++ pair_dbg(msg); \
++ } while (0)
++
++static irqreturn_t asrc_isr(int irq, void *dev_id)
++{
++ enum asrc_pair_index index;
++ u32 status;
++
++ regmap_read(asrc->regmap, REG_ASRSTR, &status);
++
++ for (index = ASRC_PAIR_A; index < ASRC_PAIR_MAX_NUM; index++) {
++ if (asrc->asrc_pair[index].active == 0)
++ continue;
++ if (status & ASRSTR_ATQOL)
++ SET_OVERLOAD_ERR(index, ASRC_TASK_Q_OVERLOAD,
++ "Task Queue FIFO overload");
++ if (status & ASRSTR_AOOL(index))
++ SET_OVERLOAD_ERR(index, ASRC_OUTPUT_TASK_OVERLOAD,
++ "Output Task Overload");
++ if (status & ASRSTR_AIOL(index))
++ SET_OVERLOAD_ERR(index, ASRC_INPUT_TASK_OVERLOAD,
++ "Input Task Overload");
++ if (status & ASRSTR_AODO(index))
++ SET_OVERLOAD_ERR(index, ASRC_OUTPUT_BUFFER_OVERFLOW,
++ "Output Data Buffer has overflowed");
++ if (status & ASRSTR_AIDU(index))
++ SET_OVERLOAD_ERR(index, ASRC_INPUT_BUFFER_UNDERRUN,
++ "Input Data Buffer has underflowed");
++ }
++
++ /* Clean overload error */
++ regmap_write(asrc->regmap, REG_ASRSTR, ASRSTR_AOLE);
++
++ return IRQ_HANDLED;
++}
++
++void asrc_get_status(struct asrc_status_flags *flags)
++{
++ enum asrc_pair_index index = flags->index;
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&data_lock, lock_flags);
++
++ flags->overload_error = asrc->asrc_pair[index].overload_error;
++
++ spin_unlock_irqrestore(&data_lock, lock_flags);
++}
++EXPORT_SYMBOL(asrc_get_status);
++
++u32 asrc_get_per_addr(enum asrc_pair_index index, bool in)
++{
++ return asrc->paddr + (in ? REG_ASRDI(index) : REG_ASRDO(index));
++}
++EXPORT_SYMBOL(asrc_get_per_addr);
++
++static int mxc_init_asrc(void)
++{
++ /* Halt ASRC internal FP when input FIFO needs data for pair A, B, C */
++ regmap_write(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEN);
++
++ /* Disable interrupt by default */
++ regmap_write(asrc->regmap, REG_ASRIER, 0x0);
++
++ /* Default 2: 6: 2 channel assignment */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_A, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_A, 2, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_B, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_B, 6, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_C, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_C, 2, asrc->channel_bits));
++
++ /* Parameter Registers recommended settings */
++ regmap_write(asrc->regmap, REG_ASRPM1, 0x7fffff);
++ regmap_write(asrc->regmap, REG_ASRPM2, 0x255555);
++ regmap_write(asrc->regmap, REG_ASRPM3, 0xff7280);
++ regmap_write(asrc->regmap, REG_ASRPM4, 0xff7280);
++ regmap_write(asrc->regmap, REG_ASRPM5, 0xff7280);
++
++ /* Base address for task queue FIFO. Set to 0x7C */
++ regmap_update_bits(asrc->regmap, REG_ASRTFR1,
++ ASRTFR1_TF_BASE_MASK, ASRTFR1_TF_BASE(0xfc));
++
++ /* Set the processing clock for 76KHz, 133M */
++ regmap_write(asrc->regmap, REG_ASR76K, 0x06D6);
++
++ /* Set the processing clock for 56KHz, 133M */
++ return regmap_write(asrc->regmap, REG_ASR56K, 0x0947);
++}
++
++#define ASRC_xPUT_DMA_CALLBACK(in) \
++ ((in) ? asrc_input_dma_callback : asrc_output_dma_callback)
++
++static void asrc_input_dma_callback(void *data)
++{
++ struct asrc_pair_params *params = (struct asrc_pair_params *)data;
++
++ complete(&params->input_complete);
++}
++
++static void asrc_output_dma_callback(void *data)
++{
++ struct asrc_pair_params *params = (struct asrc_pair_params *)data;
++
++ complete(&params->output_complete);
++}
++
++static unsigned int asrc_get_output_FIFO_size(enum asrc_pair_index index)
++{
++ u32 val;
++
++ regmap_read(asrc->regmap, REG_ASRFST(index), &val);
++
++ val &= ASRFSTx_OUTPUT_FIFO_MASK;
++
++ return val >> ASRFSTx_OUTPUT_FIFO_SHIFT;
++}
++
++static u32 asrc_read_one_from_output_FIFO(enum asrc_pair_index index)
++{
++ u32 val;
++
++ regmap_read(asrc->regmap, REG_ASRDO(index), &val);
++
++ return val;
++}
++
++static void asrc_read_output_FIFO(struct asrc_pair_params *params)
++{
++ u32 *reg24 = params->output_last_period.dma_vaddr;
++ u16 *reg16 = params->output_last_period.dma_vaddr;
++ enum asrc_pair_index index = params->index;
++ u32 i, j, reg, size, t_size;
++ bool bit24 = false;
++
++ if (params->output_word_width == ASRC_WIDTH_24_BIT)
++ bit24 = true;
++
++ t_size = 0;
++ do {
++ size = asrc_get_output_FIFO_size(index);
++ for (i = 0; i < size; i++) {
++ for (j = 0; j < params->channel_nums; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ if (bit24) {
++ *(reg24) = reg;
++ reg24++;
++ } else {
++ *(reg16) = (u16)reg;
++ reg16++;
++ }
++ }
++ }
++ t_size += size;
++ } while (size);
++
++ if (t_size > params->last_period_sample)
++ t_size = params->last_period_sample;
++
++ params->output_last_period.length = t_size * params->channel_nums * 2;
++ if (bit24)
++ params->output_last_period.length *= 2;
++}
++
++static void mxc_free_dma_buf(struct asrc_pair_params *params)
++{
++ if (params->input_dma_total.dma_vaddr != NULL) {
++ kfree(params->input_dma_total.dma_vaddr);
++ params->input_dma_total.dma_vaddr = NULL;
++ }
++
++ if (params->output_dma_total.dma_vaddr != NULL) {
++ kfree(params->output_dma_total.dma_vaddr);
++ params->output_dma_total.dma_vaddr = NULL;
++ }
++
++ if (params->output_last_period.dma_vaddr) {
++ dma_free_coherent(asrc->dev, 1024 * params->last_period_sample,
++ params->output_last_period.dma_vaddr,
++ params->output_last_period.dma_paddr);
++ params->output_last_period.dma_vaddr = NULL;
++ }
++}
++
++static int mxc_allocate_dma_buf(struct asrc_pair_params *params)
++{
++ struct dma_block *input_a, *output_a, *last_period;
++ enum asrc_pair_index index = params->index;
++
++ input_a = &params->input_dma_total;
++ output_a = &params->output_dma_total;
++ last_period = &params->output_last_period;
++
++ input_a->dma_vaddr = kzalloc(input_a->length, GFP_KERNEL);
++ if (!input_a->dma_vaddr) {
++ pair_err("failed to allocate input dma buffer\n");
++ goto exit;
++ }
++ input_a->dma_paddr = virt_to_dma(NULL, input_a->dma_vaddr);
++
++ output_a->dma_vaddr = kzalloc(output_a->length, GFP_KERNEL);
++ if (!output_a->dma_vaddr) {
++ pair_err("failed to allocate output dma buffer\n");
++ goto exit;
++ }
++ output_a->dma_paddr = virt_to_dma(NULL, output_a->dma_vaddr);
++
++ last_period->dma_vaddr = dma_zalloc_coherent(asrc->dev,
++ 1024 * params->last_period_sample,
++ &last_period->dma_paddr, GFP_KERNEL);
++ if (!last_period->dma_vaddr) {
++ pair_err("failed to allocate last period buffer\n");
++ goto exit;
++ }
++
++ return 0;
++
++exit:
++ mxc_free_dma_buf(params);
++
++ return -ENOBUFS;
++}
++
++static struct dma_chan *imx_asrc_get_dma_channel(enum asrc_pair_index index, bool in)
++{
++ char name[4];
++
++ sprintf(name, "%cx%c", in ? 'r' : 't', index + 'a');
++
++ return dma_request_slave_channel(asrc->dev, name);
++}
++
++static int imx_asrc_dma_config(struct asrc_pair_params *params,
++ struct dma_chan *chan, u32 dma_addr,
++ void *buf_addr, u32 buf_len, bool in,
++ enum asrc_word_width word_width)
++{
++ enum asrc_pair_index index = params->index;
++ struct dma_async_tx_descriptor *desc;
++ struct dma_slave_config slave_config;
++ enum dma_slave_buswidth buswidth;
++ struct scatterlist *sg;
++ unsigned int sg_nent, i;
++ int ret;
++
++ if (in) {
++ sg = params->input_sg;
++ sg_nent = params->input_sg_nodes;
++ desc = params->desc_in;
++ } else {
++ sg = params->output_sg;
++ sg_nent = params->output_sg_nodes;
++ desc = params->desc_out;
++ }
++
++ switch (word_width) {
++ case ASRC_WIDTH_16_BIT:
++ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ break;
++ case ASRC_WIDTH_24_BIT:
++ buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ break;
++ default:
++ pair_err("invalid word width\n");
++ return -EINVAL;
++ }
++
++// slave_config.dma_request0 = 0;
++// slave_config.dma_request1 = 0;
++
++ if (in) {
++ slave_config.direction = DMA_MEM_TO_DEV;
++ slave_config.dst_addr = dma_addr;
++ slave_config.dst_addr_width = buswidth;
++ slave_config.dst_maxburst =
++ params->input_wm * params->channel_nums;
++ } else {
++ slave_config.direction = DMA_DEV_TO_MEM;
++ slave_config.src_addr = dma_addr;
++ slave_config.src_addr_width = buswidth;
++ slave_config.src_maxburst =
++ params->output_wm * params->channel_nums;
++ }
++ ret = dmaengine_slave_config(chan, &slave_config);
++ if (ret) {
++ pair_err("failed to config dmaengine for %sput task: %d\n",
++ in ? "in" : "out", ret);
++ return -EINVAL;
++ }
++
++ sg_init_table(sg, sg_nent);
++ switch (sg_nent) {
++ case 1:
++ sg_init_one(sg, buf_addr, buf_len);
++ break;
++ case 2:
++ case 3:
++ case 4:
++ for (i = 0; i < (sg_nent - 1); i++)
++ sg_set_buf(&sg[i], buf_addr + i * ASRC_MAX_BUFFER_SIZE,
++ ASRC_MAX_BUFFER_SIZE);
++
++ sg_set_buf(&sg[i], buf_addr + i * ASRC_MAX_BUFFER_SIZE,
++ buf_len - ASRC_MAX_BUFFER_SIZE * i);
++ break;
++ default:
++ pair_err("invalid input DMA nodes number: %d\n", sg_nent);
++ return -EINVAL;
++ }
++
++ ret = dma_map_sg(NULL, sg, sg_nent, slave_config.direction);
++ if (ret != sg_nent) {
++ pair_err("failed to map dma sg for %sput task\n",
++ in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ desc = dmaengine_prep_slave_sg(chan, sg, sg_nent,
++ slave_config.direction, DMA_PREP_INTERRUPT);
++ if (!desc) {
++ pair_err("failed to prepare slave sg for %sput task\n",
++ in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ if (in) {
++ params->desc_in = desc;
++ params->desc_in->callback = asrc_input_dma_callback;
++ } else {
++ params->desc_out = desc;
++ params->desc_out->callback = asrc_output_dma_callback;
++ }
++
++ desc->callback = ASRC_xPUT_DMA_CALLBACK(in);
++ desc->callback_param = params;
++
++ return 0;
++}
++
++static int mxc_asrc_prepare_io_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf, bool in)
++{
++ enum asrc_pair_index index = params->index;
++ struct dma_chan *dma_channel;
++ enum asrc_word_width width;
++ unsigned int *dma_len, *sg_nodes, buf_len, wm;
++ void __user *buf_vaddr;
++ void *dma_vaddr;
++ u32 word_size, fifo_addr;
++
++ if (in) {
++ dma_channel = params->input_dma_channel;
++ dma_vaddr = params->input_dma_total.dma_vaddr;
++ dma_len = &params->input_dma_total.length;
++ width = params->input_word_width;
++ sg_nodes = &params->input_sg_nodes;
++ wm = params->input_wm;
++ buf_vaddr = (void __user *)pbuf->input_buffer_vaddr;
++ buf_len = pbuf->input_buffer_length;
++ } else {
++ dma_channel = params->output_dma_channel;
++ dma_vaddr = params->output_dma_total.dma_vaddr;
++ dma_len = &params->output_dma_total.length;
++ width = params->output_word_width;
++ sg_nodes = &params->output_sg_nodes;
++ wm = params->last_period_sample;
++ buf_vaddr = (void __user *)pbuf->output_buffer_vaddr;
++ buf_len = pbuf->output_buffer_length;
++ }
++
++ switch (width) {
++ case ASRC_WIDTH_24_BIT:
++ word_size = 4;
++ break;
++ case ASRC_WIDTH_16_BIT:
++ case ASRC_WIDTH_8_BIT:
++ word_size = 2;
++ break;
++ default:
++ pair_err("invalid %sput word size!\n", in ? "in" : "out");
++ return -EINVAL;
++ }
++
++ if (buf_len < word_size * params->channel_nums * wm) {
++ pair_err("%sput buffer size[%d] is too small!\n",
++ in ? "in" : "out", buf_len);
++ return -EINVAL;
++ }
++
++ /* Copy origin data into input buffer */
++ if (in && copy_from_user(dma_vaddr, buf_vaddr, buf_len))
++ return -EFAULT;
++
++ *dma_len = buf_len;
++ if (!in)
++ *dma_len -= wm * word_size * params->channel_nums;
++
++ *sg_nodes = *dma_len / ASRC_MAX_BUFFER_SIZE + 1;
++
++ fifo_addr = asrc_get_per_addr(params->index, in);
++
++ return imx_asrc_dma_config(params, dma_channel, fifo_addr, dma_vaddr,
++ *dma_len, in, width);
++}
++
++static int mxc_asrc_prepare_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf)
++{
++ enum asrc_pair_index index = params->index;
++ int ret;
++
++ ret = mxc_asrc_prepare_io_buffer(params, pbuf, true);
++ if (ret) {
++ pair_err("failed to prepare input buffer: %d\n", ret);
++ return ret;
++ }
++
++ ret = mxc_asrc_prepare_io_buffer(params, pbuf, false);
++ if (ret) {
++ pair_err("failed to prepare output buffer: %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++int mxc_asrc_process_buffer_pre(struct completion *complete,
++ enum asrc_pair_index index, bool in)
++{
++ if (!wait_for_completion_interruptible_timeout(complete, 10 * HZ)) {
++ pair_err("%sput dma task timeout\n", in ? "in" : "out");
++ return -ETIME;
++ } else if (signal_pending(current)) {
++ pair_err("%sput task forcibly aborted\n", in ? "in" : "out");
++ return -EBUSY;
++ }
++
++ init_completion(complete);
++
++ return 0;
++}
++
++#define mxc_asrc_dma_umap(params) \
++ do { \
++ dma_unmap_sg(NULL, params->input_sg, params->input_sg_nodes, \
++ DMA_MEM_TO_DEV); \
++ dma_unmap_sg(NULL, params->output_sg, params->output_sg_nodes, \
++ DMA_DEV_TO_MEM); \
++ } while (0)
++
++int mxc_asrc_process_buffer(struct asrc_pair_params *params,
++ struct asrc_convert_buffer *pbuf)
++{
++ enum asrc_pair_index index = params->index;
++ unsigned long lock_flags;
++ int ret;
++
++ /* Check input task first */
++ ret = mxc_asrc_process_buffer_pre(&params->input_complete, index, false);
++ if (ret) {
++ mxc_asrc_dma_umap(params);
++ return ret;
++ }
++
++ /* ...then output task*/
++ ret = mxc_asrc_process_buffer_pre(&params->output_complete, index, true);
++ if (ret) {
++ mxc_asrc_dma_umap(params);
++ return ret;
++ }
++
++ mxc_asrc_dma_umap(params);
++
++ pbuf->input_buffer_length = params->input_dma_total.length;
++ pbuf->output_buffer_length = params->output_dma_total.length;
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ if (!params->pair_hold) {
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++ return -EFAULT;
++ }
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ asrc_read_output_FIFO(params);
++
++ if (copy_to_user((void __user *)pbuf->output_buffer_vaddr,
++ params->output_dma_total.dma_vaddr,
++ params->output_dma_total.length))
++ return -EFAULT;
++
++ pbuf->output_buffer_length += params->output_last_period.length;
++
++ if (copy_to_user((void __user *)pbuf->output_buffer_vaddr +
++ params->output_dma_total.length,
++ params->output_last_period.dma_vaddr,
++ params->output_last_period.length))
++ return -EFAULT;
++
++ return 0;
++}
++
++#ifdef ASRC_POLLING_WITHOUT_DMA
++static void asrc_write_one_to_input_FIFO(enum asrc_pair_index index, u32 val)
++{
++ regmap_write(asrc->regmap, REG_ASRDI(index), val);
++}
++
++/* THIS FUNCTION ONLY EXISTS FOR DEBUGGING AND ONLY SUPPORTS TWO CHANNELS */
++static void asrc_polling_debug(struct asrc_pair_params *params)
++{
++ enum asrc_pair_index index = params->index;
++ u32 *in24 = params->input_dma_total.dma_vaddr;
++ u32 dma_len = params->input_dma_total.length / (params->channel_nums * 4);
++ u32 size, i, j, t_size, reg;
++ u32 *reg24 = params->output_dma_total.dma_vaddr;
++
++ t_size = 0;
++
++ for (i = 0; i < dma_len; ) {
++ for (j = 0; j < 2; j++) {
++ asrc_write_one_to_input_FIFO(index, *in24);
++ in24++;
++ asrc_write_one_to_input_FIFO(index, *in24);
++ in24++;
++ i++;
++ }
++ udelay(50);
++ udelay(50 * params->output_sample_rate / params->input_sample_rate);
++
++ size = asrc_get_output_FIFO_size(index);
++ for (j = 0; j < size; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ }
++ t_size += size;
++ }
++
++ mdelay(1);
++ size = asrc_get_output_FIFO_size(index);
++ for (j = 0; j < size; j++) {
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ reg = asrc_read_one_from_output_FIFO(index);
++ *(reg24) = reg;
++ reg24++;
++ }
++ t_size += size;
++
++ params->output_dma_total.length = t_size * params->channel_nums * 4;
++ params->output_last_period.length = 0;
++
++ complete(&params->input_complete);
++}
++#else
++static void mxc_asrc_submit_dma(struct asrc_pair_params *params)
++{
++ enum asrc_pair_index index = params->index;
++ u32 size = asrc_get_output_FIFO_size(params->index);
++ int i, j;
++
++ /* Read all data in OUTPUT FIFO */
++ while (size) {
++ for (j = 0; j < size; j++)
++ for (i = 0; i < params->channel_nums; i++)
++ asrc_read_one_from_output_FIFO(index);
++ /* Fetch the data every 100us */
++ udelay(100);
++
++ size = asrc_get_output_FIFO_size(index);
++ }
++
++ /* Submit dma request */
++ dmaengine_submit(params->desc_in);
++ dma_async_issue_pending(params->desc_in->chan);
++
++ dmaengine_submit(params->desc_out);
++ dma_async_issue_pending(params->desc_out->chan);
++
++ /*
++ * Clear dma request during the stall state of ASRC:
++ * During STALL state, the remaining in input fifo would never be
++ * smaller than the input threshold while the output fifo would not
++ * be bigger than output one. Thus the dma request would be cleared.
++ */
++ asrc_set_watermark(index, ASRC_FIFO_THRESHOLD_MIN, ASRC_FIFO_THRESHOLD_MAX);
++
++ /* Update the real input threshold to raise dma request */
++ asrc_set_watermark(index, params->input_wm, params->output_wm);
++}
++#endif
++
++static long asrc_ioctl_req_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ unsigned long lock_flags;
++ struct asrc_req req;
++ long ret;
++
++ ret = copy_from_user(&req, user, sizeof(req));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get req from user space: %ld\n", ret);
++ return ret;
++ }
++
++ ret = asrc_req_pair(req.chn_num, &req.index);
++ if (ret) {
++ dev_err(asrc->dev, "failed to request pair: %ld\n", ret);
++ return ret;
++ }
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ params->pair_hold = 1;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++ params->index = req.index;
++ params->channel_nums = req.chn_num;
++
++ ret = copy_to_user(user, &req, sizeof(req));
++ if (ret) {
++ dev_err(asrc->dev, "failed to send req to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_config_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ struct asrc_config config;
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&config, user, sizeof(config));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get config from user space: %ld\n", ret);
++ return ret;
++ }
++
++ index = config.pair;
++
++ ret = asrc_config_pair(&config);
++ if (ret) {
++ pair_err("failed to config pair: %ld\n", ret);
++ return ret;
++ }
++
++ params->input_wm = 4;
++ params->output_wm = 2;
++
++ ret = asrc_set_watermark(index, params->input_wm, params->output_wm);
++ if (ret)
++ return ret;
++
++ params->output_buffer_size = config.dma_buffer_size;
++ params->input_buffer_size = config.dma_buffer_size;
++ if (config.buffer_num > ASRC_DMA_BUFFER_NUM)
++ params->buffer_num = ASRC_DMA_BUFFER_NUM;
++ else
++ params->buffer_num = config.buffer_num;
++
++ params->input_dma_total.length = ASRC_DMA_BUFFER_SIZE;
++ params->output_dma_total.length = ASRC_DMA_BUFFER_SIZE;
++
++ params->input_word_width = config.input_word_width;
++ params->output_word_width = config.output_word_width;
++
++ params->input_sample_rate = config.input_sample_rate;
++ params->output_sample_rate = config.output_sample_rate;
++
++ if (params->output_sample_rate > params->input_sample_rate)
++ params->last_period_sample = ASRC_OUTPUT_LAST_SAMPLE_DEFAULT_MAX;
++ else
++ params->last_period_sample = ASRC_OUTPUT_LAST_SAMPLE_DEFAULT;
++
++
++ ret = mxc_allocate_dma_buf(params);
++ if (ret) {
++ pair_err("failed to allocate dma buffer: %ld\n", ret);
++ return ret;
++ }
++
++ /* Request DMA channel for both input and output */
++ params->input_dma_channel = imx_asrc_get_dma_channel(index, true);
++ if (params->input_dma_channel == NULL) {
++ pair_err("failed to request input task dma channel\n");
++ return -EBUSY;
++ }
++
++ params->output_dma_channel = imx_asrc_get_dma_channel(index, false);
++ if (params->output_dma_channel == NULL) {
++ pair_err("failed to request output task dma channel\n");
++ return -EBUSY;
++ }
++
++ ret = copy_to_user(user, &config, sizeof(config));
++ if (ret) {
++ pair_err("failed to send config to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_release_pair(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ unsigned long lock_flags;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ /* index might be not valid due to some application failure. */
++ if (index < 0)
++ return -EINVAL;
++
++ params->asrc_active = 0;
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ params->pair_hold = 0;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ if (params->input_dma_channel)
++ dma_release_channel(params->input_dma_channel);
++ if (params->output_dma_channel)
++ dma_release_channel(params->output_dma_channel);
++ mxc_free_dma_buf(params);
++ asrc_release_pair(index);
++ asrc_finish_conv(index);
++
++ return 0;
++}
++
++static long asrc_ioctl_convert(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ struct asrc_convert_buffer buf;
++ long ret;
++
++ ret = copy_from_user(&buf, user, sizeof(buf));
++ if (ret) {
++ pair_err("failed to get buf from user space: %ld\n", ret);
++ return ret;
++ }
++
++ ret = mxc_asrc_prepare_buffer(params, &buf);
++ if (ret) {
++ pair_err("failed to prepare buffer: %ld\n", ret);
++ return ret;
++ }
++
++#ifdef ASRC_POLLING_WITHOUT_DMA
++ asrc_polling_debug(params);
++#else
++ mxc_asrc_submit_dma(params);
++#endif
++
++ ret = mxc_asrc_process_buffer(params, &buf);
++ if (ret) {
++ pair_err("failed to process buffer: %ld\n", ret);
++ return ret;
++ }
++
++ ret = copy_to_user(user, &buf, sizeof(buf));
++ if (ret) {
++ pair_err("failed to send buf to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_start_conv(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ params->asrc_active = 1;
++ asrc_start_conv(index);
++
++ return 0;
++}
++
++static long asrc_ioctl_stop_conv(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index;
++ long ret;
++
++ ret = copy_from_user(&index, user, sizeof(index));
++ if (ret) {
++ dev_err(asrc->dev, "failed to get index from user space: %ld\n", ret);
++ return ret;
++ }
++
++ dmaengine_terminate_all(params->input_dma_channel);
++ dmaengine_terminate_all(params->output_dma_channel);
++
++ asrc_stop_conv(index);
++ params->asrc_active = 0;
++
++ return 0;
++}
++
++static long asrc_ioctl_status(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ struct asrc_status_flags flags;
++ long ret;
++
++ ret = copy_from_user(&flags, user, sizeof(flags));
++ if (ret) {
++ pair_err("failed to get flags from user space: %ld\n", ret);
++ return ret;
++ }
++
++ asrc_get_status(&flags);
++
++ ret = copy_to_user(user, &flags, sizeof(flags));
++ if (ret) {
++ pair_err("failed to send flags to user space: %ld\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl_flush(struct asrc_pair_params *params,
++ void __user *user)
++{
++ enum asrc_pair_index index = params->index;
++ init_completion(&params->input_complete);
++ init_completion(&params->output_complete);
++
++ /* Release DMA and request again */
++ dma_release_channel(params->input_dma_channel);
++ dma_release_channel(params->output_dma_channel);
++
++ params->input_dma_channel = imx_asrc_get_dma_channel(index, true);
++ if (params->input_dma_channel == NULL) {
++ pair_err("failed to request input task dma channel\n");
++ return -EBUSY;
++ }
++
++ params->output_dma_channel = imx_asrc_get_dma_channel(index, false);
++ if (params->output_dma_channel == NULL) {
++ pair_err("failed to request output task dma channel\n");
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static long asrc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct asrc_pair_params *params = file->private_data;
++ void __user *user = (void __user *)arg;
++ long ret = 0;
++
++ switch (cmd) {
++ case ASRC_REQ_PAIR:
++ ret = asrc_ioctl_req_pair(params, user);
++ break;
++ case ASRC_CONFIG_PAIR:
++ ret = asrc_ioctl_config_pair(params, user);
++ break;
++ case ASRC_RELEASE_PAIR:
++ ret = asrc_ioctl_release_pair(params, user);
++ break;
++ case ASRC_CONVERT:
++ ret = asrc_ioctl_convert(params, user);
++ break;
++ case ASRC_START_CONV:
++ ret = asrc_ioctl_start_conv(params, user);
++ dump_regs();
++ break;
++ case ASRC_STOP_CONV:
++ ret = asrc_ioctl_stop_conv(params, user);
++ break;
++ case ASRC_STATUS:
++ ret = asrc_ioctl_status(params, user);
++ break;
++ case ASRC_FLUSH:
++ ret = asrc_ioctl_flush(params, user);
++ break;
++ default:
++ dev_err(asrc->dev, "invalid ioctl cmd!\n");
++ break;
++ }
++
++ return ret;
++}
++
++static int mxc_asrc_open(struct inode *inode, struct file *file)
++{
++ struct asrc_pair_params *params;
++ unsigned long lock_flags;
++ int i = 0, ret = 0;
++
++ ret = signal_pending(current);
++ if (ret) {
++ dev_err(asrc->dev, "current process has a signal pending\n");
++ return ret;
++ }
++
++ params = kzalloc(sizeof(struct asrc_pair_params), GFP_KERNEL);
++ if (params == NULL) {
++ dev_err(asrc->dev, "failed to allocate pair_params\n");
++ return -ENOBUFS;
++ }
++
++ file->private_data = params;
++
++ while (asrc->params[i])
++ i++;
++
++ if (i >= ASRC_PAIR_MAX_NUM) {
++ dev_err(asrc->dev, "All pairs are being occupied\n");
++ return -EBUSY;
++ }
++
++ init_completion(&params->input_complete);
++ init_completion(&params->output_complete);
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ asrc->params[i] = params;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ return ret;
++}
++
++static int mxc_asrc_close(struct inode *inode, struct file *file)
++{
++ struct asrc_pair_params *params;
++ unsigned long lock_flags;
++ int i;
++
++ params = file->private_data;
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ for (i = 0; i < ASRC_PAIR_MAX_NUM; i++)
++ if (asrc->params[i] == params)
++ asrc->params[i] = NULL;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ if (!params)
++ return 0;
++
++ if (params->asrc_active) {
++ params->asrc_active = 0;
++
++ dmaengine_terminate_all(params->input_dma_channel);
++ dmaengine_terminate_all(params->output_dma_channel);
++
++ asrc_stop_conv(params->index);
++
++ complete(&params->input_complete);
++ complete(&params->output_complete);
++ }
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ if (params->pair_hold) {
++ params->pair_hold = 0;
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++
++ if (params->input_dma_channel)
++ dma_release_channel(params->input_dma_channel);
++ if (params->output_dma_channel)
++ dma_release_channel(params->output_dma_channel);
++
++ mxc_free_dma_buf(params);
++
++ asrc_release_pair(params->index);
++ asrc_finish_conv(params->index);
++ } else {
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++ }
++
++ spin_lock_irqsave(&pair_lock, lock_flags);
++ kfree(params);
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++ file->private_data = NULL;
++
++ return 0;
++}
++
++static int mxc_asrc_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret;
++
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ size, vma->vm_page_prot);
++ if (ret) {
++ dev_err(asrc->dev, "failed to memory map!\n");
++ return ret;
++ }
++
++ vma->vm_flags &= ~VM_IO;
++
++ return ret;
++}
++
++static const struct file_operations asrc_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = asrc_ioctl,
++ .mmap = mxc_asrc_mmap,
++ .open = mxc_asrc_open,
++ .release = mxc_asrc_close,
++};
++
++static struct miscdevice asrc_miscdev = {
++ .name = "mxc_asrc",
++ .fops = &asrc_fops,
++ .minor = MISC_DYNAMIC_MINOR,
++};
++
++static int asrc_read_proc_attr(struct file *file, char __user *buf,
++ size_t count, loff_t *off)
++{
++ char tmpbuf[80];
++ int len = 0;
++ u32 reg;
++
++ if (*off)
++ return 0;
++
++ regmap_read(asrc->regmap, REG_ASRCNCR, &reg);
++
++ len += sprintf(tmpbuf, "ANCA: %d\nANCB: %d\nANCC: %d\n",
++ ASRCNCR_ANCx_get(ASRC_PAIR_A, reg, asrc->channel_bits),
++ ASRCNCR_ANCx_get(ASRC_PAIR_B, reg, asrc->channel_bits),
++ ASRCNCR_ANCx_get(ASRC_PAIR_C, reg, asrc->channel_bits));
++
++ if (len > count)
++ return 0;
++
++ if (copy_to_user(buf, &tmpbuf, len))
++ return -EFAULT;
++
++ *off += len;
++
++ return len;
++}
++
++#define ASRC_MAX_PROC_BUFFER_SIZE 63
++
++static int asrc_write_proc_attr(struct file *file, const char __user *buffer,
++ size_t count, loff_t *data)
++{
++ char buf[ASRC_MAX_PROC_BUFFER_SIZE];
++ int na, nb, nc;
++ int total;
++
++ if (count > ASRC_MAX_PROC_BUFFER_SIZE) {
++ dev_err(asrc->dev, "proc write: the input string was too long\n");
++ return -EINVAL;
++ }
++
++ if (copy_from_user(buf, buffer, count)) {
++ dev_err(asrc->dev, "proc write: failed to copy buffer from user\n");
++ return -EFAULT;
++ }
++
++ sscanf(buf, "ANCA: %d\nANCB: %d\nANCC: %d", &na, &nb, &nc);
++
++ total = asrc->channel_bits > 3 ? 10 : 5;
++
++ if (na + nb + nc > total) {
++ dev_err(asrc->dev, "don't surpass %d for total\n", total);
++ return -EINVAL;
++ } else if (asrc->channel_bits < 4 &&
++ (na % 2 != 0 || nb % 2 != 0 || nc % 2 != 0)) {
++ dev_err(asrc->dev, "please set an even number for each pair\n");
++ return -EINVAL;
++ } else if (na < 0 || nb < 0 || nc < 0) {
++ dev_err(asrc->dev, "please set an positive number for each pair\n");
++ return -EINVAL;
++ }
++
++
++ asrc->asrc_pair[ASRC_PAIR_A].chn_max = na;
++ asrc->asrc_pair[ASRC_PAIR_B].chn_max = nb;
++ asrc->asrc_pair[ASRC_PAIR_C].chn_max = nc;
++
++ /* Update channel number settings */
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_A, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_A, na, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_B, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_B, nb, asrc->channel_bits));
++ regmap_update_bits(asrc->regmap, REG_ASRCNCR,
++ ASRCNCR_ANCx_MASK(ASRC_PAIR_C, asrc->channel_bits),
++ ASRCNCR_ANCx_set(ASRC_PAIR_C, nc, asrc->channel_bits));
++
++ return count;
++}
++
++static const struct file_operations asrc_proc_fops = {
++ .read = asrc_read_proc_attr,
++ .write = asrc_write_proc_attr,
++};
++
++static void asrc_proc_create(void)
++{
++ struct proc_dir_entry *proc_attr;
++
++ asrc->proc_asrc = proc_mkdir(ASRC_PROC_PATH, NULL);
++ if (!asrc->proc_asrc) {
++ dev_err(asrc->dev, "failed to create proc entry %s\n", ASRC_PROC_PATH);
++ return;
++ }
++
++ proc_attr = proc_create("ChSettings", S_IFREG | S_IRUGO | S_IWUSR,
++ asrc->proc_asrc, &asrc_proc_fops);
++ if (!proc_attr) {
++ remove_proc_entry(ASRC_PROC_PATH, NULL);
++ dev_err(asrc->dev, "failed to create proc attribute entry\n");
++ }
++}
++
++static void asrc_proc_remove(void)
++{
++ remove_proc_entry("ChSettings", asrc->proc_asrc);
++ remove_proc_entry(ASRC_PROC_PATH, NULL);
++}
++
++
++static bool asrc_readable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case REG_ASRCTR:
++ case REG_ASRIER:
++ case REG_ASRCNCR:
++ case REG_ASRCFG:
++ case REG_ASRCSR:
++ case REG_ASRCDR1:
++ case REG_ASRCDR2:
++ case REG_ASRSTR:
++ case REG_ASRPM1:
++ case REG_ASRPM2:
++ case REG_ASRPM3:
++ case REG_ASRPM4:
++ case REG_ASRPM5:
++ case REG_ASRTFR1:
++ case REG_ASRCCR:
++ case REG_ASRDOA:
++ case REG_ASRDOB:
++ case REG_ASRDOC:
++ case REG_ASRIDRHA:
++ case REG_ASRIDRLA:
++ case REG_ASRIDRHB:
++ case REG_ASRIDRLB:
++ case REG_ASRIDRHC:
++ case REG_ASRIDRLC:
++ case REG_ASR76K:
++ case REG_ASR56K:
++ case REG_ASRMCRA:
++ case REG_ASRFSTA:
++ case REG_ASRMCRB:
++ case REG_ASRFSTB:
++ case REG_ASRMCRC:
++ case REG_ASRFSTC:
++ case REG_ASRMCR1A:
++ case REG_ASRMCR1B:
++ case REG_ASRMCR1C:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool asrc_volatile_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case REG_ASRSTR:
++ case REG_ASRDIA:
++ case REG_ASRDIB:
++ case REG_ASRDIC:
++ case REG_ASRDOA:
++ case REG_ASRDOB:
++ case REG_ASRDOC:
++ case REG_ASRFSTA:
++ case REG_ASRFSTB:
++ case REG_ASRFSTC:
++ case REG_ASRCFG:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool asrc_writeable_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case REG_ASRCTR:
++ case REG_ASRIER:
++ case REG_ASRCNCR:
++ case REG_ASRCFG:
++ case REG_ASRCSR:
++ case REG_ASRCDR1:
++ case REG_ASRCDR2:
++ case REG_ASRSTR:
++ case REG_ASRPM1:
++ case REG_ASRPM2:
++ case REG_ASRPM3:
++ case REG_ASRPM4:
++ case REG_ASRPM5:
++ case REG_ASRTFR1:
++ case REG_ASRCCR:
++ case REG_ASRDIA:
++ case REG_ASRDIB:
++ case REG_ASRDIC:
++ case REG_ASRIDRHA:
++ case REG_ASRIDRLA:
++ case REG_ASRIDRHB:
++ case REG_ASRIDRLB:
++ case REG_ASRIDRHC:
++ case REG_ASRIDRLC:
++ case REG_ASR76K:
++ case REG_ASR56K:
++ case REG_ASRMCRA:
++ case REG_ASRMCRB:
++ case REG_ASRMCRC:
++ case REG_ASRMCR1A:
++ case REG_ASRMCR1B:
++ case REG_ASRMCR1C:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static struct regmap_config asrc_regmap_config = {
++ .reg_bits = 32,
++ .reg_stride = 4,
++ .val_bits = 32,
++
++ .max_register = REG_ASRMCR1C,
++ .readable_reg = asrc_readable_reg,
++ .volatile_reg = asrc_volatile_reg,
++ .writeable_reg = asrc_writeable_reg,
++ .cache_type = REGCACHE_RBTREE,
++};
++
++static int mxc_asrc_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *of_id = of_match_device(fsl_asrc_ids, &pdev->dev);
++ struct device_node *np = pdev->dev.of_node;
++ enum mxc_asrc_type devtype;
++ struct resource *res;
++ void __iomem *regs;
++ int ret;
++
++ /* Check if the device is existed */
++ if (!np)
++ return -ENODEV;
++
++ asrc = devm_kzalloc(&pdev->dev, sizeof(struct asrc_data), GFP_KERNEL);
++ if (!asrc)
++ return -ENOMEM;
++
++ if (of_id) {
++ const struct platform_device_id *id_entry = of_id->data;
++ devtype = id_entry->driver_data;
++ } else {
++ devtype = pdev->id_entry->driver_data;
++ }
++
++ asrc->dev = &pdev->dev;
++ asrc->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ asrc->asrc_pair[ASRC_PAIR_A].chn_max = 2;
++ asrc->asrc_pair[ASRC_PAIR_B].chn_max = 6;
++ asrc->asrc_pair[ASRC_PAIR_C].chn_max = 2;
++ asrc->asrc_pair[ASRC_PAIR_A].overload_error = 0;
++ asrc->asrc_pair[ASRC_PAIR_B].overload_error = 0;
++ asrc->asrc_pair[ASRC_PAIR_C].overload_error = 0;
++
++ /* Map the address */
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (IS_ERR(res)) {
++ dev_err(&pdev->dev, "could not determine device resources\n");
++ return PTR_ERR(res);
++ }
++
++ regs = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(regs)) {
++ dev_err(&pdev->dev, "could not map device resources\n");
++ return PTR_ERR(regs);
++ }
++ asrc->paddr = res->start;
++
++ /* Register regmap and let it prepare core clock */
++ asrc->regmap = devm_regmap_init_mmio_clk(&pdev->dev,
++ "mem", regs, &asrc_regmap_config);
++ if (IS_ERR(asrc->regmap)) {
++ dev_err(&pdev->dev, "regmap init failed\n");
++ return PTR_ERR(asrc->regmap);
++ }
++
++ asrc->irq = platform_get_irq(pdev, 0);
++ if (asrc->irq == NO_IRQ) {
++ dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
++ return asrc->irq;
++ }
++
++ ret = devm_request_irq(&pdev->dev, asrc->irq, asrc_isr, 0, np->name, NULL);
++ if (ret) {
++ dev_err(&pdev->dev, "could not claim irq %u: %d\n", asrc->irq, ret);
++ return ret;
++ }
++
++ asrc->mem_clk = devm_clk_get(&pdev->dev, "mem");
++ if (IS_ERR(asrc->mem_clk)) {
++ dev_err(&pdev->dev, "failed to get mem clock\n");
++ return PTR_ERR(asrc->ipg_clk);
++ }
++
++ asrc->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
++ if (IS_ERR(asrc->ipg_clk)) {
++ dev_err(&pdev->dev, "failed to get ipg clock\n");
++ return PTR_ERR(asrc->ipg_clk);
++ }
++
++ asrc->asrck_clk = devm_clk_get(&pdev->dev, "asrck");
++ if (IS_ERR(asrc->asrck_clk)) {
++ dev_err(&pdev->dev, "failed to get asrck clock\n");
++ return PTR_ERR(asrc->asrck_clk);
++ }
++
++ asrc->dma_clk = devm_clk_get(&pdev->dev, "dma");
++ if (IS_ERR(asrc->dma_clk)) {
++ dev_err(&pdev->dev, "failed to get dma script clock\n");
++ return PTR_ERR(asrc->dma_clk);
++ }
++
++ switch (devtype) {
++ case IMX35_ASRC:
++ asrc->channel_bits = 3;
++ input_clk_map = input_clk_map_v1;
++ output_clk_map = output_clk_map_v1;
++ break;
++ case IMX53_ASRC:
++ asrc->channel_bits = 4;
++ input_clk_map = input_clk_map_v2;
++ output_clk_map = output_clk_map_v2;
++ break;
++ default:
++ dev_err(&pdev->dev, "unsupported device type\n");
++ return -EINVAL;
++ }
++
++ ret = misc_register(&asrc_miscdev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register char device %d\n", ret);
++ return ret;
++ }
++
++ asrc_proc_create();
++
++ ret = mxc_init_asrc();
++ if (ret) {
++ dev_err(&pdev->dev, "failed to init asrc %d\n", ret);
++ goto err_misc;
++ }
++
++ dev_info(&pdev->dev, "mxc_asrc registered\n");
++
++ return ret;
++
++err_misc:
++ misc_deregister(&asrc_miscdev);
++
++ return ret;
++}
++
++static int mxc_asrc_remove(struct platform_device *pdev)
++{
++ asrc_proc_remove();
++ misc_deregister(&asrc_miscdev);
++
++ return 0;
++}
++
++#if CONFIG_PM_SLEEP
++static int mxc_asrc_suspend(struct device *dev)
++{
++ struct asrc_pair_params *params;
++ unsigned long lock_flags;
++ int i;
++
++ for (i = 0; i < ASRC_PAIR_MAX_NUM; i++) {
++ spin_lock_irqsave(&pair_lock, lock_flags);
++
++ params = asrc->params[i];
++ if (!params || !params->pair_hold) {
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++ continue;
++ }
++
++ if (!completion_done(&params->input_complete)) {
++ if (params->input_dma_channel)
++ dmaengine_terminate_all(params->input_dma_channel);
++ asrc_input_dma_callback((void *)params);
++ }
++ if (!completion_done(&params->output_complete)) {
++ if (params->output_dma_channel)
++ dmaengine_terminate_all(params->output_dma_channel);
++ asrc_output_dma_callback((void *)params);
++ }
++
++ spin_unlock_irqrestore(&pair_lock, lock_flags);
++ }
++
++ regcache_cache_only(asrc->regmap, true);
++ regcache_mark_dirty(asrc->regmap);
++
++ return 0;
++}
++
++static int mxc_asrc_resume(struct device *dev)
++{
++ u32 asrctr;
++
++ /* Stop all pairs provisionally */
++ regmap_read(asrc->regmap, REG_ASRCTR, &asrctr);
++ regmap_update_bits(asrc->regmap, REG_ASRCTR, ASRCTR_ASRCEx_ALL_MASK, 0);
++
++ regcache_cache_only(asrc->regmap, false);
++ regcache_sync(asrc->regmap);
++
++ /* Restart enabled pairs */
++ regmap_update_bits(asrc->regmap, REG_ASRCTR,
++ ASRCTR_ASRCEx_ALL_MASK, asrctr);
++
++ return 0;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++static const struct dev_pm_ops mxc_asrc_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(mxc_asrc_suspend, mxc_asrc_resume)
++};
++
++static struct platform_driver mxc_asrc_driver = {
++ .driver = {
++ .name = "mxc_asrc",
++ .of_match_table = fsl_asrc_ids,
++ .pm = &mxc_asrc_pm_ops,
++ },
++ .probe = mxc_asrc_probe,
++ .remove = mxc_asrc_remove,
++};
++
++module_platform_driver(mxc_asrc_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Asynchronous Sample Rate Converter");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:mxc_asrc");
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c linux-xbian-imx6/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.c 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,932 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_InitializeInfo
++**
++** Initialize architecture dependent command buffer information.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ /* Reset interrupts. */
++ Command->info.feBufferInt = -1;
++ Command->info.tsOverflowInt = -1;
++
++ /* Set command buffer attributes. */
++ Command->info.addressAlignment = 64;
++ Command->info.commandAlignment = 8;
++
++ /* Determine command alignment address mask. */
++ Command->info.addressMask = ((((gctUINT32) (Command->info.addressAlignment - 1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0 ) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Query the number of bytes needed by the STATE command. */
++ gcmkERR_BREAK(gckVGCOMMAND_StateCommand(
++ Command, 0x0, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.stateCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RESTART command. */
++ gcmkERR_BREAK(gckVGCOMMAND_RestartCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.restartCommandSize
++ ));
++
++ /* Query the number of bytes needed by the FETCH command. */
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.fetchCommandSize
++ ));
++
++ /* Query the number of bytes needed by the CALL command. */
++ gcmkERR_BREAK(gckVGCOMMAND_CallCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.callCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RETURN command. */
++ gcmkERR_BREAK(gckVGCOMMAND_ReturnCommand(
++ Command, gcvNULL,
++ &Command->info.returnCommandSize
++ ));
++
++ /* Query the number of bytes needed by the EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, gcvNULL, gcvBLOCK_PIXEL, -1,
++ &Command->info.eventCommandSize
++ ));
++
++ /* Query the number of bytes needed by the END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command, gcvNULL, -1,
++ &Command->info.endCommandSize
++ ));
++
++ /* Determine the tail reserve size. */
++ Command->info.staticTailSize = gcmMAX(
++ Command->info.fetchCommandSize,
++ gcmMAX(
++ Command->info.returnCommandSize,
++ Command->info.endCommandSize
++ )
++ );
++
++ /* Determine the maximum tail size. */
++ Command->info.dynamicTailSize
++ = Command->info.staticTailSize
++ + Command->info.eventCommandSize * gcvBLOCK_COUNT;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_StateCommand
++**
++** Append a STATE command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctUINT32 Pipe
++** Harwdare destination pipe.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** STATE command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 Address
++** Starting register address of the state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT32 Count
++** Number of states in state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the STATE command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the STATE command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Pipe=0x%x Logical=0x%x Address=0x%x Count=0x%x Bytes = 0x%x",
++ Command, Pipe, Logical, Address, Count, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) ((gctUINT32) (Pipe) & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LOAD_STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_RestartCommand
++**
++** Form a RESTART command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RESTART command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this RESTART
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this RESTART command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RESTART command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RESTART command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++ gctUINT32 beginEndMark;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Determine Begin/End flag. */
++ beginEndMark = (FetchCount > 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)));
++
++ /* Append RESTART. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x9 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)))
++ | beginEndMark;
++
++ buffer[1]
++ = FetchAddress;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RESTART command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_FetchCommand
++**
++** Form a FETCH command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** FETCH command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this FETCH
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this FETCH command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the FETCH command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the FETCH command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append FETCH. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x5 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the FETCH command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LINK. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_CallCommand
++**
++** Append a CALL command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** CALL command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this CALL
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this CALL command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the CALL command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the CALL command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append CALL. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x6 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CALL command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_ReturnCommand
++**
++** Append a RETURN command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RETURN command at or gcvNULL to query the size of the command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RETURN command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RETURN command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Bytes = 0x%x",
++ Command, Logical, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append RETURN. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x7 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RETURN command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EventCommand
++**
++** Form an EVENT command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** EVENT command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gceBLOCK Block
++** Block that will generate the interrupt.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Block=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, Block, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ typedef struct _gcsEVENTSTATES
++ {
++ /* Chips before VG21 use these values. */
++ gctUINT eventFromFE;
++ gctUINT eventFromPE;
++
++ /* VG21 chips and later use SOURCE field. */
++ gctUINT eventSource;
++ }
++ gcsEVENTSTATES;
++
++ static gcsEVENTSTATES states[] =
++ {
++ /* gcvBLOCK_COMMAND */
++ {
++ (gctUINT)~0,
++ (gctUINT)~0,
++ (gctUINT)~0
++ },
++
++ /* gcvBLOCK_TESSELLATOR */
++ {
++ 0x0,
++ 0x1,
++ 0x10
++ },
++
++ /* gcvBLOCK_TESSELLATOR2 */
++ {
++ 0x0,
++ 0x1,
++ 0x12
++ },
++
++ /* gcvBLOCK_TESSELLATOR3 */
++ {
++ 0x0,
++ 0x1,
++ 0x14
++ },
++
++ /* gcvBLOCK_RASTER */
++ {
++ 0x0,
++ 0x1,
++ 0x07,
++ },
++
++ /* gcvBLOCK_VG */
++ {
++ 0x0,
++ 0x1,
++ 0x0F
++ },
++
++ /* gcvBLOCK_VG2 */
++ {
++ 0x0,
++ 0x1,
++ 0x11
++ },
++
++ /* gcvBLOCK_VG3 */
++ {
++ 0x0,
++ 0x1,
++ 0x13
++ },
++
++ /* gcvBLOCK_PIXEL */
++ {
++ 0x0,
++ 0x1,
++ 0x07
++ },
++ };
++
++ /* Verify block ID. */
++ gcmkVERIFY_ARGUMENT(gcmIS_VALID_INDEX(Block, states));
++
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++
++ /* Determine chip version. */
++ if (Command->vg21)
++ {
++ /* Get the event source for the block. */
++ gctUINT eventSource = states[Block].eventSource;
++
++ /* Supported? */
++ if (eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) ((gctUINT32) (eventSource) & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ else
++ {
++ /* Get the event source for the block. */
++ gctUINT eventFromFE = states[Block].eventFromFE;
++ gctUINT eventFromPE = states[Block].eventFromPE;
++
++ /* Supported? */
++ if (eventFromFE == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (eventFromFE) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (eventFromPE) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Make sure the events are directly supported for the block. */
++ if (states[Block].eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine event source. */
++ if (Block == gcvBLOCK_COMMAND)
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++ else
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EndCommand
++**
++** Form an END command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** END command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append END. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR memory;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ memory = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ memory[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ memory[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append END. */
++ memory[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 16;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h linux-xbian-imx6/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_command_vg.h 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,319 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_command_vg_h_
++#define __gc_hal_kernel_hardware_command_vg_h_
++
++/******************************************************************************\
++******************* Task and Interrupt Management Structures. ******************
++\******************************************************************************/
++
++/* Task storage header. */
++typedef struct _gcsTASK_STORAGE * gcsTASK_STORAGE_PTR;
++typedef struct _gcsTASK_STORAGE
++{
++ /* Next allocated storage buffer. */
++ gcsTASK_STORAGE_PTR next;
++}
++gcsTASK_STORAGE;
++
++/* Task container header. */
++typedef struct _gcsTASK_CONTAINER * gcsTASK_CONTAINER_PTR;
++typedef struct _gcsTASK_CONTAINER
++{
++ /* The number of tasks left to be processed in the container. */
++ gctINT referenceCount;
++
++ /* Size of the buffer. */
++ gctUINT size;
++
++ /* Link to the previous and the next allocated containers. */
++ gcsTASK_CONTAINER_PTR allocPrev;
++ gcsTASK_CONTAINER_PTR allocNext;
++
++ /* Link to the previous and the next containers in the free list. */
++ gcsTASK_CONTAINER_PTR freePrev;
++ gcsTASK_CONTAINER_PTR freeNext;
++}
++gcsTASK_CONTAINER;
++
++/* Kernel space task master table entry. */
++typedef struct _gcsBLOCK_TASK_ENTRY * gcsBLOCK_TASK_ENTRY_PTR;
++typedef struct _gcsBLOCK_TASK_ENTRY
++{
++ /* Pointer to the current task container for the block. */
++ gcsTASK_CONTAINER_PTR container;
++
++ /* Pointer to the current task data within the container. */
++ gcsTASK_HEADER_PTR task;
++
++ /* Pointer to the last link task within the container. */
++ gcsTASK_LINK_PTR link;
++
++ /* Number of interrupts allocated for this block. */
++ gctUINT interruptCount;
++
++ /* The index of the current interrupt. */
++ gctUINT interruptIndex;
++
++ /* Interrupt semaphore. */
++ gctSEMAPHORE interruptSemaphore;
++
++ /* Interrupt value array. */
++ gctINT32 interruptArray[32];
++}
++gcsBLOCK_TASK_ENTRY;
++
++
++/******************************************************************************\
++********************* Command Queue Management Structures. *********************
++\******************************************************************************/
++
++/* Command queue kernel element pointer. */
++typedef struct _gcsKERNEL_CMDQUEUE * gcsKERNEL_CMDQUEUE_PTR;
++
++/* Command queue object handler function type. */
++typedef gceSTATUS (* gctOBJECT_HANDLER) (
++ gckVGKERNEL Kernel,
++ gcsKERNEL_CMDQUEUE_PTR Entry
++ );
++
++/* Command queue kernel element. */
++typedef struct _gcsKERNEL_CMDQUEUE
++{
++ /* The number of buffers in the queue. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Pointer to the object handler function. */
++ gctOBJECT_HANDLER handler;
++}
++gcsKERNEL_CMDQUEUE;
++
++/* Command queue header. */
++typedef struct _gcsKERNEL_QUEUE_HEADER * gcsKERNEL_QUEUE_HEADER_PTR;
++typedef struct _gcsKERNEL_QUEUE_HEADER
++{
++ /* The size of the buffer in bytes. */
++ gctUINT size;
++
++ /* The number of pending entries to be processed. */
++ volatile gctUINT pending;
++
++ /* The current command queue entry. */
++ gcsKERNEL_CMDQUEUE_PTR currentEntry;
++
++ /* Next buffer. */
++ gcsKERNEL_QUEUE_HEADER_PTR next;
++}
++gcsKERNEL_QUEUE_HEADER;
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++/* gckVGCOMMAND object. */
++struct _gckVGCOMMAND
++{
++ /***************************************************************************
++ ** Object data and pointers.
++ */
++
++ gcsOBJECT object;
++ gckVGKERNEL kernel;
++ gckOS os;
++ gckVGHARDWARE hardware;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++
++ /***************************************************************************
++ ** Enable command queue dumping.
++ */
++
++ gctBOOL enableDumping;
++
++
++ /***************************************************************************
++ ** Bus Error interrupt.
++ */
++
++ gctINT32 busErrorInt;
++
++
++ /***************************************************************************
++ ** Command buffer information.
++ */
++
++ gcsCOMMAND_BUFFER_INFO info;
++
++
++ /***************************************************************************
++ ** Synchronization objects.
++ */
++
++ gctPOINTER queueMutex;
++ gctPOINTER taskMutex;
++ gctPOINTER commitMutex;
++
++
++ /***************************************************************************
++ ** Task management.
++ */
++
++ /* The head of the storage buffer linked list. */
++ gcsTASK_STORAGE_PTR taskStorage;
++
++ /* Allocation size. */
++ gctUINT taskStorageGranularity;
++ gctUINT taskStorageUsable;
++
++ /* The free container list. */
++ gcsTASK_CONTAINER_PTR taskFreeHead;
++ gcsTASK_CONTAINER_PTR taskFreeTail;
++
++ /* Task table */
++ gcsBLOCK_TASK_ENTRY taskTable[gcvBLOCK_COUNT];
++
++
++ /***************************************************************************
++ ** Command queue.
++ */
++
++ /* Pointer to the allocated queue memory. */
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++
++ /* Pointer to the current available queue from which new queue entries
++ will be allocated. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* If different from queueHead, points to the command queue which is
++ currently being executed by the hardware. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++
++ /* Points to the queue to merge the tail with when the tail is processed. */
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++
++ /* Queue overflow counter. */
++ gctUINT queueOverflow;
++
++
++ /***************************************************************************
++ ** Context.
++ */
++
++ /* Context counter used for unique ID. */
++ gctUINT64 contextCounter;
++
++ /* Current context ID. */
++ gctUINT64 currentContext;
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++ gctINT32 powerStallInt;
++ gcsCMDBUFFER_PTR powerStallBuffer;
++ gctSIGNAL powerStallSignal;
++
++};
++
++/******************************************************************************\
++************************ gckVGCOMMAND Object Internal API. ***********************
++\******************************************************************************/
++
++/* Initialize architecture dependent command buffer information. */
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ );
++
++/* Form a STATE command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a RESTART command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a FETCH command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a CALL command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form a RETURN command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form an EVENT command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Form an END command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++#endif /* __gc_hal_kernel_hardware_command_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c linux-xbian-imx6/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.c 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,2114 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++ gcvPOWER_FLAG_NOP = 1 << 11,
++}
++gcePOWER_FLAGS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckOS Os
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ /* Read register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00104,
++ 0x00000000));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32 * ChipFeatures,
++ OUT gctUINT32 * ChipMinorFeatures,
++ OUT gctUINT32 * ChipMinorFeatures2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 chipIdentity;
++
++ do
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG, 0x00018, &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ *ChipModel = gcv500;
++ *ChipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00020,
++ (gctUINT32 *) ChipModel));
++
++ /* Read CHIP_REV register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00024,
++ ChipRevision));
++ }
++
++ /* Read chip feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x0001C, ChipFeatures
++ ));
++
++ /* Read chip minor feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00034, ChipMinorFeatures
++ ));
++
++ /* Read chip minor feature register #2. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00074, ChipMinorFeatures2
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_VERBOSE,
++ "ChipModel=0x%08X\n"
++ "ChipRevision=0x%08X\n"
++ "ChipFeatures=0x%08X\n"
++ "ChipMinorFeatures=0x%08X\n"
++ "ChipMinorFeatures2=0x%08X\n",
++ *ChipModel,
++ *ChipRevision,
++ *ChipFeatures,
++ *ChipMinorFeatures,
++ *ChipMinorFeatures2
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_VGPowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckVGHARDWARE hardware = (gckVGHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++/******************************************************************************\
++****************************** gckVGHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Construct
++**
++** Construct a new gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** OUTPUT:
++**
++** gckVGHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckVGHARDWARE
++** object.
++*/
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ )
++{
++ gckVGHARDWARE hardware = gcvNULL;
++ gceSTATUS status;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x ", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ status = _ResetGPU(Os);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ /* Identify the hardware. */
++ gcmkERR_BREAK(_IdentifyHardware(Os,
++ &chipModel, &chipRevision,
++ &chipFeatures, &chipMinorFeatures, &chipMinorFeatures2
++ ));
++
++ /* Allocate the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckVGHARDWARE), (gctPOINTER *) &hardware
++ ));
++
++ /* Initialize the gckVGHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++
++ /* Set chip identity. */
++ hardware->chipModel = chipModel;
++ hardware->chipRevision = chipRevision;
++ hardware->chipFeatures = chipFeatures;
++ hardware->chipMinorFeatures = chipMinorFeatures;
++ hardware->chipMinorFeatures2 = chipMinorFeatures2;
++
++ hardware->powerMutex = gcvNULL;
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->chipPowerStateGlobal = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTime = 0;
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _VGPowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ /* Determine whether FE 2.0 is present. */
++ hardware->fe20 = ((((gctUINT32) (hardware->chipFeatures)) >> (0 ? 28:28) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))));
++
++ /* Determine whether VG 2.0 is present. */
++ hardware->vg20 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 13:13) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))));
++
++ /* Determine whether VG 2.1 is present. */
++ hardware->vg21 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++
++ /* Set default event mask. */
++ hardware->eventMask = 0xFFFFFFFF;
++
++ gcmkERR_BREAK(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++ /* Set fast clear to auto. */
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(hardware, -1));
++
++ gcmkERR_BREAK(gckOS_CreateMutex(Os, &hardware->powerMutex));
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Return pointer to the gckVGHARDWARE object. */
++ *Hardware = hardware;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Free(Os, hardware));
++ }
++
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvFALSE, gcvFALSE));
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Destroy
++**
++** Destroy an gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ if (Hardware->powerMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(
++ Hardware->os, Hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ if (Hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++ }
++
++ /* Free the object. */
++ status = gckOS_Free(Hardware->os, Hardware);
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x InternalSize=0x%x InternalBaseAddress=0x%x InternalAlignment=0x%x"
++ "ExternalSize=0x%x ExternalBaseAddress=0x%x ExternalAlignment=0x%x HorizontalTileSize=0x%x VerticalTileSize=0x%x",
++ Hardware, InternalSize, InternalBaseAddress, InternalAlignment,
++ ExternalSize, ExternalBaseAddress, ExternalAlignment, HorizontalTileSize, VerticalTileSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gceCHIPMODEL * ChipModel
++** If 'ChipModel' is not gcvNULL, the variable it points to will
++** receive the model of the chip.
++**
++** gctUINT32 * ChipRevision
++** If 'ChipRevision' is not gcvNULL, the variable it points to will
++** receive the revision of the chip.
++**
++** gctUINT32 * ChipFeatures
++** If 'ChipFeatures' is not gcvNULL, the variable it points to will
++** receive the feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures
++** If 'ChipMinorFeatures' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures2
++** If 'ChipMinorFeatures2' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures2
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x ChipModel=0x%x ChipRevision=0x%x ChipFeatures = 0x%x ChipMinorFeatures = 0x%x ChipMinorFeatures2 = 0x%x",
++ Hardware, ChipModel, ChipRevision, ChipFeatures, ChipMinorFeatures, ChipMinorFeatures2);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Return chip model. */
++ if (ChipModel != gcvNULL)
++ {
++ *ChipModel = Hardware->chipModel;
++ }
++
++ /* Return revision number. */
++ if (ChipRevision != gcvNULL)
++ {
++ *ChipRevision = Hardware->chipRevision;
++ }
++
++ /* Return feature set. */
++ if (ChipFeatures != gcvNULL)
++ {
++ gctUINT32 features = Hardware->chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 since it did not have this *\
++ \* bit. */
++ if ((Hardware->chipModel == gcv500)
++ && (Hardware->chipRevision == 0)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ /* Mark 2D pipe as available for GC300 since it did not have this *\
++ \* bit. */
++ if (Hardware->chipModel == gcv300)
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ *ChipFeatures = features;
++ }
++
++ /* Return minor feature set. */
++ if (ChipMinorFeatures != gcvNULL)
++ {
++ *ChipMinorFeatures = Hardware->chipMinorFeatures;
++ }
++
++ /* Return minor feature set #2. */
++ if (ChipMinorFeatures2 != gcvNULL)
++ {
++ *ChipMinorFeatures2 = Hardware->chipMinorFeatures2;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertFormat
++**
++** Convert an API format to hardware parameters.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gceSURF_FORMAT Format
++** API format to convert.
++**
++** OUTPUT:
++**
++** gctUINT32 * BitsPerPixel
++** Pointer to a variable that will hold the number of bits per pixel.
++**
++** gctUINT32 * BytesPerTile
++** Pointer to a variable that will hold the number of bytes per tile.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ )
++{
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bytesPerTile;
++
++ gcmkHEADER_ARG("Hardware=0x%x Format=0x%x BitsPerPixel=0x%x BytesPerTile = 0x%x",
++ Hardware, Format, BitsPerPixel, BytesPerTile);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Dispatch on format. */
++ switch (Format)
++ {
++ case gcvSURF_A1:
++ case gcvSURF_L1:
++ /* 1-bpp format. */
++ bitsPerPixel = 1;
++ bytesPerTile = (1 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_A4:
++ /* 4-bpp format. */
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_INDEX8:
++ case gcvSURF_A8:
++ case gcvSURF_L8:
++ /* 8-bpp format. */
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_YV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_NV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ /* 4444 variations. */
++ case gcvSURF_X4R4G4B4:
++ case gcvSURF_A4R4G4B4:
++ case gcvSURF_R4G4B4X4:
++ case gcvSURF_R4G4B4A4:
++ case gcvSURF_B4G4R4X4:
++ case gcvSURF_B4G4R4A4:
++ case gcvSURF_X4B4G4R4:
++ case gcvSURF_A4B4G4R4:
++
++ /* 1555 variations. */
++ case gcvSURF_X1R5G5B5:
++ case gcvSURF_A1R5G5B5:
++ case gcvSURF_R5G5B5X1:
++ case gcvSURF_R5G5B5A1:
++ case gcvSURF_X1B5G5R5:
++ case gcvSURF_A1B5G5R5:
++ case gcvSURF_B5G5R5X1:
++ case gcvSURF_B5G5R5A1:
++
++ /* 565 variations. */
++ case gcvSURF_R5G6B5:
++ case gcvSURF_B5G6R5:
++
++ case gcvSURF_A8L8:
++ case gcvSURF_YUY2:
++ case gcvSURF_UYVY:
++ case gcvSURF_D16:
++ /* 16-bpp format. */
++ bitsPerPixel = 16;
++ bytesPerTile = (16 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_X8R8G8B8:
++ case gcvSURF_A8R8G8B8:
++ case gcvSURF_X8B8G8R8:
++ case gcvSURF_A8B8G8R8:
++ case gcvSURF_R8G8B8X8:
++ case gcvSURF_R8G8B8A8:
++ case gcvSURF_B8G8R8X8:
++ case gcvSURF_B8G8R8A8:
++ case gcvSURF_D32:
++ /* 32-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_D24S8:
++ /* 24-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT1:
++ case gcvSURF_ETC1:
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT2:
++ case gcvSURF_DXT3:
++ case gcvSURF_DXT4:
++ case gcvSURF_DXT5:
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ default:
++ /* Invalid format. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Set the result. */
++ if (BitsPerPixel != gcvNULL)
++ {
++ * BitsPerPixel = bitsPerPixel;
++ }
++
++ if (BytesPerTile != gcvNULL)
++ {
++ * BytesPerTile = bytesPerTile;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Pool=0x%x Offset = 0x%x",
++ Hardware, Address, Pool, Offset);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 1:0)) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x2:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = ((((gctUINT32) (Address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address of the command buffer.
++**
++** gctSIZE_T Count
++** Number of command-sized data units to be executed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Count=0x%x",
++ Hardware, Address, Count);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ /* Enable all events. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00014,
++ Hardware->eventMask
++ ));
++
++ if (Hardware->fe20)
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00500,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00504,
++ Count
++ ));
++ }
++ else
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00654,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00658,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_AlignToTile
++**
++** Align the specified width and height to tile boundaries.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gceSURF_TYPE Type
++** Type of alignment.
++**
++** gctUINT32 * Width
++** Pointer to the width to be aligned. If 'Width' is gcvNULL, no width
++** will be aligned.
++**
++** gctUINT32 * Height
++** Pointer to the height to be aligned. If 'Height' is gcvNULL, no height
++** will be aligned.
++**
++** OUTPUT:
++**
++** gctUINT32 * Width
++** Pointer to a variable that will receive the aligned width.
++**
++** gctUINT32 * Height
++** Pointer to a variable that will receive the aligned height.
++*/
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32 * Width,
++ IN OUT gctUINT32 * Height
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Type=0x%x Width=0x%x Height=0x%x",
++ Hardware, Type, Width, Height);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Width != gcvNULL)
++ {
++ /* Align the width. */
++ *Width = gcmALIGN(*Width, (Type == gcvSURF_TEXTURE) ? 4 : 16);
++ }
++
++ if (Height != gcvNULL)
++ {
++ /* Special case for VG images. */
++ if ((*Height == 0) && (Type == gcvSURF_IMAGE))
++ {
++ *Height = 4;
++ }
++ else
++ {
++ /* Align the height. */
++ *Height = gcmALIGN(*Height, 4);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Address=0x%x",
++ Hardware, Logical, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ /* Convert logical address into a physical address. */
++ gcmkERR_BREAK(gckOS_GetPhysicalAddress(
++ Hardware->os, Logical, &address
++ ));
++
++ /* Return hardware specific address. */
++ *Address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x SystemSize=0x%x SystemBaseAddress=0x%x",
++ Hardware, SystemSize, SystemBaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = (gctSIZE_T)(1 << 31);
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x",
++ Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ do
++ {
++ /* Convert the logical address into an hardware address. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertLogical(Hardware, Logical, &address) );
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00400,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00404,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00408,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x0040C,
++ gcmkFIXADDRESS(address)) );
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00410,
++ gcmkFIXADDRESS(address)) );
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT32_PTR buffer;
++
++ /* Create a shortcut to the command buffer object. */
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &commandBuffer, (gctPOINTER *) &buffer
++ ));
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Index=0x%x Offset=0x%x Address=0x%x",
++ Hardware, Index, Offset, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ address = (Index << 12) | Offset;
++
++ /* Set virtual type. */
++ address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Set the result. */
++ *Address = address;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x Data=0x%x", Hardware, Data);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ /* Read register and return. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, Data);
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ )
++{
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ if (!(((((gctUINT32) (Hardware->chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ return gcvSTATUS_OK;
++ }
++
++ do
++ {
++ if (Enable == -1)
++ {
++ Enable = (Hardware->chipModel > gcv500) ||
++ ((Hardware->chipModel == gcv500) && (Hardware->chipRevision >= 3));
++ }
++
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ &debug));
++
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++#ifdef AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION)));
++#endif
++
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ debug));
++
++ Hardware->allowFastClear = Enable;
++
++ status = gcvFALSE;
++ }
++ while (gcvFALSE);
++
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x IDs=0x%x", Hardware, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IDs != gcvNULL);
++
++ /* Read AQIntrAcknowledge register. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00010,
++ IDs);
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS _CommandStall(
++ gckVGHARDWARE Hardware)
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gctUINT32_PTR buffer;
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &command->powerStallBuffer,
++ (gctPOINTER *) &buffer
++ ));
++
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ command, buffer, gcvBLOCK_PIXEL,
++ command->powerStallInt, gcvNULL));
++
++ gcmkERR_BREAK(gckVGCOMMAND_Execute(
++ command,
++ command->powerStallBuffer
++ ));
++
++ /* Wait the signal. */
++ gcmkERR_BREAK(gckOS_WaitSignal(
++ command->os,
++ command->powerStallSignal,
++ gcdGPU_TIMEOUT));
++
++
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag/*, clock*/;
++
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL commitMutex = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++
++ gctBOOL broadcast = gcvFALSE;
++ gctUINT32 process, thread;
++ gctBOOL global = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_NOP,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_NOP,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d",
++ State);
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Acquire the power mutex. */
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE)
++ {
++ /* gcvPOWER_IDLE_BROADCAST is from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ /*clock = clocks[State];*/
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* internal power control */
++ if (!global)
++ {
++ if (Hardware->chipPowerStateGlobal == gcvPOWER_OFF)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os,
++ command->commitMutex,
++ gcvINFINITE
++ ));
++
++ commitMutex = gcvTRUE;
++
++ gcmkONERROR(_CommandStall(Hardware));
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState)
++ {
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ gcmkONERROR(gckVGHARDWARE_SetMMU(Hardware, Hardware->kernel->mmu->pageTableLogical));
++
++ /* Force the command queue to reload the next context. */
++ command->currentContext = 0;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ gcvCORE_VG,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++ if (global)
++ {
++ /* Save the new power state. */
++ Hardware->chipPowerStateGlobal = State;
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++#if gcdPOWEROFF_TIMEOUT
++ Hardware->powerOffTimeout = Timeout;
++#endif
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++#if gcdPOWEROFF_TIMEOUT
++ *Timeout = Hardware->powerOffTimeout;
++#endif
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 8:8)) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 10:10)) & ((gctUINT32) ((((1 ? 10:10) - (0 ? 10:10) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 10:10) - (0 ? 10:10) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ *IsIdle = gcvTRUE;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h linux-xbian-imx6/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/arch/GC350/hal/kernel/gc_hal_kernel_hardware_vg.h 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,75 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_vg_h_
++#define __gc_hal_kernel_hardware_vg_h_
++
++/* gckHARDWARE object. */
++struct _gckVGHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckKERNEL object. */
++ gckVGKERNEL kernel;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Chip characteristics. */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++ gctBOOL allowFastClear;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++ /* Event mask. */
++ gctUINT32 eventMask;
++
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gceCHIPPOWERSTATE chipPowerStateGlobal;
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++ gctPOINTER pageTableDirty;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctBOOL powerManagement;
++};
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c linux-xbian-imx6/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_context.c 2015-07-27 23:13:06.170964992 +0200
+@@ -0,0 +1,1735 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++#include "gc_hal_kernel_buffer.h"
++
++/******************************************************************************\
++******************************** Debugging Macro *******************************
++\******************************************************************************/
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++
++/******************************************************************************\
++************************** Context State Buffer Helpers ************************
++\******************************************************************************/
++
++#define _STATE(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT(reg, count) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT_OFFSET(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_MIRROR_COUNT(reg, mirror, count) \
++ _StateMirror(\
++ Context, \
++ reg ## _Address >> 2, \
++ count, \
++ mirror ## _Address >> 2 \
++ )
++
++#define _STATE_HINT(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_HINT_BLOCK(reg, block, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + (block << reg ## _BLK), \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_X(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvTRUE, gcvFALSE \
++ )
++
++#define _CLOSE_RANGE() \
++ _TerminateStateBlock(Context, index)
++
++#define _ENABLE(reg, field) \
++ do \
++ { \
++ if (gcmVERIFYFIELDVALUE(data, reg, MASK_ ## field, ENABLED)) \
++ { \
++ enable |= gcmFIELDMASK(reg, field); \
++ } \
++ } \
++ while (gcvFALSE)
++
++#define _BLOCK_COUNT(reg) \
++ ((reg ## _Count) >> (reg ## _BLK))
++
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++#define gcdSTATE_MASK \
++ (((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 | 0xC0FFEE & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))))
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_TerminateStateBlock(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T align;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ /* Flush the current state block; make sure no pairing with the states
++ to follow happens. */
++ if (align && (buffer != gcvNULL))
++ {
++ buffer[Index] = 0xDEADDEAD;
++ }
++
++ /* Reset last address. */
++ Context->lastAddress = ~0U;
++
++ /* Return alignment requirement. */
++ return align;
++}
++#endif
++
++
++static gctSIZE_T
++_FlushPipe(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Flush the current pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Flushing 3D pipe takes 6 slots. */
++ return 6;
++}
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_SemaphoreStall(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Semaphore/stall takes 4 slots. */
++ return 4;
++}
++#endif
++
++static gctSIZE_T
++_SwitchPipe(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer
++ = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++ }
++
++ return 2;
++}
++
++#if !defined(VIVANTE_NO_3D)
++static gctSIZE_T
++_State(
++ IN gckCONTEXT Context,
++ IN gctSIZE_T Index,
++ IN gctUINT32 Address,
++ IN gctUINT32 Value,
++ IN gctSIZE_T Size,
++ IN gctBOOL FixedPoint,
++ IN gctBOOL Hinted
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T align, i;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ if ((buffer == gcvNULL) && (Address + Size > Context->stateCount))
++ {
++ /* Determine maximum state. */
++ Context->stateCount = Address + Size;
++ }
++
++ /* Do we need a new entry? */
++ if ((Address != Context->lastAddress) || (FixedPoint != Context->lastFixed))
++ {
++ if (buffer != gcvNULL)
++ {
++ if (align)
++ {
++ /* Add filler. */
++ buffer[Index++] = 0xDEADDEAD;
++ }
++
++ /* LoadState(Address, Count). */
++ gcmkASSERT((Index & 1) == 0);
++
++ if (FixedPoint)
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++ else
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ /* Walk all the states. */
++ for (i = 0; i < Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + 1 + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = Index + 1 + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Save information for this LoadState. */
++ Context->lastIndex = Index;
++ Context->lastAddress = Address + Size;
++ Context->lastSize = Size;
++ Context->lastFixed = FixedPoint;
++
++ /* Return size for load state. */
++ return align + 1 + Size;
++ }
++
++ /* Append this state to the previous one. */
++ if (buffer != gcvNULL)
++ {
++ /* Update last load state. */
++ buffer[Context->lastIndex] =
++ ((((gctUINT32) (buffer[Context->lastIndex])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Context->lastSize + Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Walk all the states. */
++ for (i = 0; i < Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = Index + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Update last address and size. */
++ Context->lastAddress += Size;
++ Context->lastSize += Size;
++
++ /* Return number of slots required. */
++ return Size;
++}
++
++static gctSIZE_T
++_StateMirror(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Size,
++ IN gctUINT32 AddressMirror
++ )
++{
++ gctSIZE_T i;
++
++ /* Process when buffer is set. */
++ if (Context->buffer != gcvNULL)
++ {
++ /* Walk all states. */
++ for (i = 0; i < Size; i++)
++ {
++ /* Copy the mapping address. */
++ Context->map[Address + i].index =
++ Context->map[AddressMirror + i].index;
++ }
++ }
++
++ /* Return the number of required maps. */
++ return Size;
++}
++#endif
++
++static gceSTATUS
++_InitializeContextBuffer(
++ IN gckCONTEXT Context
++ )
++{
++ gctUINT32_PTR buffer;
++ gctSIZE_T index;
++
++#if !defined(VIVANTE_NO_3D)
++ gctUINT i;
++ gctUINT vertexUniforms, fragmentUniforms;
++ gctUINT fe2vsCount;
++ gctBOOL halti0;
++#endif
++
++ /* Reset the buffer index. */
++ index = 0;
++
++ /* Reset the last state address. */
++ Context->lastAddress = ~0U;
++
++ /* Get the buffer pointer. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++
++ /**************************************************************************/
++ /* Build 2D states. *******************************************************/
++
++
++#if !defined(VIVANTE_NO_3D)
++ /**************************************************************************/
++ /* Build 3D states. *******************************************************/
++ halti0 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) );
++
++ /* Query shader support. */
++ gcmkVERIFY_OK(gckHARDWARE_QueryShaderCaps(
++ Context->hardware, &vertexUniforms, &fragmentUniforms, gcvNULL));
++
++ /* Store the 3D entry index. */
++ Context->entryOffset3D = index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Current context pointer. */
++#if gcdDEBUG
++ index += _State(Context, index, 0x03850 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ index += _FlushPipe(Context, index, gcvPIPE_3D);
++
++ /* Global states. */
++ index += _State(Context, index, 0x03814 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03818 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0381C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03820 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03828 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0382C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03834 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0384C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Front End states. */
++ fe2vsCount = 12;
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++ index += _State(Context, index, 0x00600 >> 2, 0x00000000, fe2vsCount, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x00644 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00648 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0064C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00650 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00680 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x006A0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0067C >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x006C0 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00700 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00740 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00780 >> 2, 0x3F800000, 16, gcvFALSE, gcvFALSE);
++
++ /* Vertex Shader states. */
++ index += _State(Context, index, 0x00800 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00804 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00808 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0080C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00810 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00820 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00830 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ if (Context->hardware->identity.instructionCount <= 256)
++ {
++ index += _State(Context, index, 0x04000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x05000 >> 2, 0x00000000, vertexUniforms * 4, gcvFALSE, gcvFALSE);
++
++ /* Primitive Assembly states. */
++ index += _State(Context, index, 0x00A00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A0C >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A10 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A1C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A28 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A2C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A30 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A40 >> 2, 0x00000000, 10, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A38 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A3C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A80 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A84 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A8C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Setup states. */
++ index += _State(Context, index, 0x00C00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C08 >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C0C >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C1C >> 2, 0x42000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C20 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C24 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++
++ /* Raster states. */
++ index += _State(Context, index, 0x00E00 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E10 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E40 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E08 >> 2, 0x00000031, 1, gcvFALSE, gcvFALSE);
++
++ /* Pixel Shader states. */
++ index += _State(Context, index, 0x01000 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01004 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0100C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01010 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01018 >> 2, 0x01000000, 1, gcvFALSE, gcvFALSE);
++ if (Context->hardware->identity.instructionCount <= 256)
++ {
++ index += _State(Context, index, 0x06000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x07000 >> 2, 0x00000000, fragmentUniforms * 4, gcvFALSE, gcvFALSE);
++
++ /* Texture states. */
++ index += _State(Context, index, 0x02000 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02040 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02080 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x020C0 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02100 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02140 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02180 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x021C0 >> 2, 0x00321000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02200 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02240 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x02400 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02440 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02480 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x024C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02500 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02540 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02580 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x025C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02600 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02640 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02680 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x026C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02700 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02740 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ))
++ {
++ gctUINT texBlockCount;
++
++ /* New texture block. */
++ index += _State(Context, index, 0x10000 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10080 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10100 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10180 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10200 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10280 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ for (i = 0; i < 256 / 16; i += 1)
++ {
++ index += _State(Context, index, (0x02C00 >> 2) + i * 16, 0x00000000, 14, gcvFALSE, gcvFALSE);
++ }
++ index += _State(Context, index, 0x10300 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10380 >> 2, 0x00321000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10400 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10480 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 15:15)) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x12000 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x12400 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ }
++
++ if ((Context->hardware->identity.chipModel == gcv2000)
++ && (Context->hardware->identity.chipRevision == 0x5108))
++ {
++ texBlockCount = 12;
++ }
++ else
++ {
++ texBlockCount = ((512) >> (4));
++ }
++ for (i = 0; i < texBlockCount; i += 1)
++ {
++ index += _State(Context, index, (0x10800 >> 2) + (i << 4), 0x00000000, 14, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ /* YUV. */
++ index += _State(Context, index, 0x01678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0167C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01680 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01684 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01688 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0168C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01690 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01694 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01698 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0169C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Thread walker states. */
++ index += _State(Context, index, 0x00900 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00904 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00908 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0090C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00910 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00914 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00918 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0091C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00924 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (Context->hardware->identity.instructionCount > 1024)
++ {
++ /* New Shader instruction memory. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00860 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x20000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ else if (Context->hardware->identity.instructionCount > 256)
++ {
++ /* New Shader instruction memory. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* VX instruction memory. */
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x0C000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ _StateMirror(Context, 0x08000 >> 2, Context->hardware->identity.instructionCount << 2 , 0x0C000 >> 2);
++ }
++
++ /* Store the index of the "XD" entry. */
++ Context->entryOffsetXDFrom3D = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Pixel Engine states. */
++ index += _State(Context, index, 0x01400 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01404 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01408 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0140C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01414 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01418 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0141C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01420 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01424 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01428 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0142C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01434 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01454 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01458 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0145C >> 2, 0x00000010, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A8 >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014AC >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A4 >> 2, 0x000E400C, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01580 >> 2, 0x00000000, 3, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Composition states. */
++ index += _State(Context, index, 0x03008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (Context->hardware->identity.pixelPipes == 1)
++ {
++ index += _State(Context, index, 0x01460 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01430 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01410 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x01460 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ for (i = 0; i < 2; i++)
++ {
++ index += _State(Context, index, (0x01500 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ if (Context->hardware->identity.pixelPipes > 1 || halti0)
++ {
++ index += _State(Context, index, (0x01480 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ /* Resolve states. */
++ index += _State(Context, index, 0x01604 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01608 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0160C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01610 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01614 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01620 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01630 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01640 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0163C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (Context->hardware->identity.pixelPipes > 1)
++ {
++ index += _State(Context, index, (0x016C0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, (0x016E0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01700 >> 2, 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvFALSE);
++ }
++
++ /* Tile status. */
++ index += _State(Context, index, 0x01654 >> 2, 0x00200000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x01658 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0165C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01660 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01664 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01668 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0166C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A4 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x016AC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01720 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01740 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01760 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Semaphore/stall. */
++ index += _SemaphoreStall(Context, index);
++#endif
++
++ /**************************************************************************/
++ /* Link to another address. ***********************************************/
++
++ Context->linkIndex3D = index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++ /* Store the end of the context buffer. */
++ Context->bufferSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /**************************************************************************/
++ /* Pipe switch for the case where neither 2D nor 3D are used. *************/
++
++ /* Store the 3D entry index. */
++ Context->entryOffsetXDFrom2D = index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Store the location of the link. */
++ Context->linkIndexXD = index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++
++ /**************************************************************************/
++ /* Save size for buffer. **************************************************/
++
++ Context->totalSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_DestroyContext(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ if (Context != gcvNULL)
++ {
++ gcsCONTEXT_PTR bufferHead;
++
++ /* Free context buffers. */
++ for (bufferHead = Context->buffer; Context->buffer != gcvNULL;)
++ {
++ /* Get a shortcut to the current buffer. */
++ gcsCONTEXT_PTR buffer = Context->buffer;
++
++ /* Get the next buffer. */
++ gcsCONTEXT_PTR next = buffer->next;
++
++ /* Last item? */
++ if (next == bufferHead)
++ {
++ next = gcvNULL;
++ }
++
++ /* Destroy the signal. */
++ if (buffer->signal != gcvNULL)
++ {
++ gcmkONERROR(gckOS_DestroySignal(
++ Context->os, buffer->signal
++ ));
++
++ buffer->signal = gcvNULL;
++ }
++
++ /* Free state delta map. */
++ if (buffer->logical != gcvNULL)
++ {
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkONERROR(gckEVENT_DestroyVirtualCommandBuffer(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++
++#else
++ gcmkONERROR(gckEVENT_FreeContiguousMemory(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++#endif
++
++ buffer->logical = gcvNULL;
++ }
++
++ /* Free context buffer. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, buffer));
++
++ /* Remove from the list. */
++ Context->buffer = next;
++ }
++
++#if gcdSECURE_USER
++ /* Free the hint array. */
++ if (Context->hint != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->hint));
++ }
++#endif
++ /* Free record array copy. */
++ if (Context->recordArray != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArray));
++ }
++
++ /* Free the state mapping. */
++ if (Context->map != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->map));
++ }
++
++ /* Mark the gckCONTEXT object as unknown. */
++ Context->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCONTEXT object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context));
++ }
++
++OnError:
++ return status;
++}
++
++
++/******************************************************************************\
++**************************** Context Management API ****************************
++\******************************************************************************/
++
++/******************************************************************************\
++**
++** gckCONTEXT_Construct
++**
++** Construct a new gckCONTEXT object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gckHARDWARE Hardware
++** Pointer to gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable thet will receive the gckCONTEXT object
++** pointer.
++*/
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gctSIZE_T allocationSize;
++ gctUINT i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%08X Hardware=0x%08X", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++
++
++ /**************************************************************************/
++ /* Allocate and initialize basic fields of gckCONTEXT. ********************/
++
++ /* The context object size. */
++ allocationSize = gcmSIZEOF(struct _gckCONTEXT);
++
++ /* Allocate the object. */
++ gcmkONERROR(gckOS_Allocate(
++ Os, allocationSize, &pointer
++ ));
++
++ context = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(context, allocationSize));
++
++ /* Initialize the gckCONTEXT object. */
++ context->object.type = gcvOBJ_CONTEXT;
++ context->os = Os;
++ context->hardware = Hardware;
++
++
++#if defined(VIVANTE_NO_3D)
++ context->entryPipe = gcvPIPE_2D;
++ context->exitPipe = gcvPIPE_2D;
++#elif gcdCMD_NO_2D_CONTEXT
++ context->entryPipe = gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#else
++ context->entryPipe
++ = (((((gctUINT32) (context->hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) )
++ ? gcvPIPE_2D
++ : gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#endif
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Hardware,
++ &context->alignment,
++ &context->reservedHead,
++ &context->reservedTail
++ ));
++
++ /* Mark the context as dirty to force loading of the entire state table
++ the first time. */
++ context->dirty = gcvTRUE;
++
++
++ /**************************************************************************/
++ /* Get the size of the context buffer. ************************************/
++
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++
++ /**************************************************************************/
++ /* Compute the size of the record array. **********************************/
++
++ context->recordArraySize
++ = gcmSIZEOF(gcsSTATE_DELTA_RECORD) * context->stateCount;
++
++
++ if (context->stateCount > 0)
++ {
++ /**************************************************************************/
++ /* Allocate and reset the state mapping table. ****************************/
++
++ /* Allocate the state mapping table. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsSTATE_MAP) * context->stateCount,
++ &pointer
++ ));
++
++ context->map = pointer;
++
++ /* Zero the state mapping table. */
++ gcmkONERROR(gckOS_ZeroMemory(
++ context->map, gcmSIZEOF(gcsSTATE_MAP) * context->stateCount
++ ));
++
++
++ /**************************************************************************/
++ /* Allocate the hint array. ***********************************************/
++
++#if gcdSECURE_USER
++ /* Allocate hints. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gctBOOL) * context->stateCount,
++ &pointer
++ ));
++
++ context->hint = pointer;
++#endif
++ }
++
++ /**************************************************************************/
++ /* Allocate the context and state delta buffers. **************************/
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i += 1)
++ {
++ /* Allocate a context buffer. */
++ gcsCONTEXT_PTR buffer;
++
++ /* Allocate the context buffer structure. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsCONTEXT),
++ &pointer
++ ));
++
++ buffer = pointer;
++
++ /* Reset the context buffer structure. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ buffer, gcmSIZEOF(gcsCONTEXT)
++ ));
++
++ /* Append to the list. */
++ if (context->buffer == gcvNULL)
++ {
++ buffer->next = buffer;
++ context->buffer = buffer;
++ }
++ else
++ {
++ buffer->next = context->buffer->next;
++ context->buffer->next = buffer;
++ }
++
++ /* Set the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ buffer->num = i;
++#endif
++
++ /* Create the busy signal. */
++ gcmkONERROR(gckOS_CreateSignal(
++ Os, gcvFALSE, &buffer->signal
++ ));
++
++ /* Set the signal, buffer is currently not busy. */
++ gcmkONERROR(gckOS_Signal(
++ Os, buffer->signal, gcvTRUE
++ ));
++
++ /* Create a new physical context buffer. */
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer(
++ context->hardware->kernel,
++ gcvFALSE,
++ &context->totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++#else
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Os,
++ gcvFALSE,
++ &context->totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++#endif
++
++ buffer->logical = pointer;
++
++ /* Set gckEVENT object pointer. */
++ buffer->eventObj = Hardware->kernel->eventObj;
++
++ /* Set the pointers to the LINK commands. */
++ if (context->linkIndex2D != 0)
++ {
++ buffer->link2D = &buffer->logical[context->linkIndex2D];
++ }
++
++ if (context->linkIndex3D != 0)
++ {
++ buffer->link3D = &buffer->logical[context->linkIndex3D];
++ }
++
++ if (context->linkIndexXD != 0)
++ {
++ gctPOINTER xdLink;
++ gctUINT8_PTR xdEntryLogical;
++ gctSIZE_T xdEntrySize;
++ gctSIZE_T linkBytes;
++
++ /* Determine LINK parameters. */
++ xdLink
++ = &buffer->logical[context->linkIndexXD];
++
++ xdEntryLogical
++ = (gctUINT8_PTR) buffer->logical
++ + context->entryOffsetXDFrom3D;
++
++ xdEntrySize
++ = context->bufferSize
++ - context->entryOffsetXDFrom3D;
++
++ /* Query LINK size. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware, gcvNULL, gcvNULL, 0, &linkBytes
++ ));
++
++ /* Generate a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware,
++ xdLink,
++ xdEntryLogical,
++ xdEntrySize,
++ &linkBytes
++ ));
++ }
++ }
++
++
++ /**************************************************************************/
++ /* Initialize the context buffers. ****************************************/
++
++ /* Initialize the current context buffer. */
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++ /* Make all created contexts equal. */
++ {
++ gcsCONTEXT_PTR currContext, tempContext;
++
++ /* Set the current context buffer. */
++ currContext = context->buffer;
++
++ /* Get the next context buffer. */
++ tempContext = currContext->next;
++
++ /* Loop through all buffers. */
++ while (tempContext != currContext)
++ {
++ if (tempContext == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ /* Copy the current context. */
++ gckOS_MemCopy(
++ tempContext->logical,
++ currContext->logical,
++ context->totalSize
++ );
++
++ /* Get the next context buffer. */
++ tempContext = tempContext->next;
++ }
++ }
++
++ /* Return pointer to the gckCONTEXT object. */
++ *Context = context;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%08X", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back on error. */
++ gcmkVERIFY_OK(_DestroyContext(context));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Destroy
++**
++** Destroy a gckCONTEXT object.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%08X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Destroy the context and all related objects. */
++ status = _DestroyContext(Context);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Update
++**
++** Merge all pending state delta buffers into the current context buffer.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSTATE_DELTA _stateDelta;
++ gckKERNEL kernel;
++ gcsCONTEXT_PTR buffer;
++ gcsSTATE_MAP_PTR map;
++ gctBOOL needCopy = gcvFALSE;
++ gcsSTATE_DELTA_PTR nDelta;
++ gcsSTATE_DELTA_PTR uDelta = gcvNULL;
++ gcsSTATE_DELTA_PTR kDelta = gcvNULL;
++ gcsSTATE_DELTA_RECORD_PTR record;
++ gcsSTATE_DELTA_RECORD_PTR recordArray = gcvNULL;
++ gctUINT elementCount;
++ gctUINT address;
++ gctUINT32 mask;
++ gctUINT32 data;
++ gctUINT index;
++ gctUINT i, j;
++
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++
++ gcmkHEADER_ARG(
++ "Context=0x%08X ProcessID=%d StateDelta=0x%08X",
++ Context, ProcessID, StateDelta
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Get a shortcut to the kernel object. */
++ kernel = Context->hardware->kernel;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Context->os, ProcessID, &needCopy));
++
++ /* Allocate the copy buffer for the user record array. */
++ if (needCopy && (Context->recordArray == gcvNULL))
++ {
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &Context->recordArray
++ ));
++ }
++
++ /* Get the current context buffer. */
++ buffer = Context->buffer;
++
++ /* Wait until the context buffer becomes available; this will
++ also reset the signal and mark the buffer as busy. */
++ gcmkONERROR(gckOS_WaitSignal(
++ Context->os, buffer->signal, gcvINFINITE
++ ));
++
++#if gcdSECURE_USER
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE) && 1 && !defined(VIVANTE_NO_3D)
++ /* Update current context token. */
++ buffer->logical[Context->map[0x0E14].index]
++ = gcmPTR2INT(Context);
++#endif
++
++ /* Are there any pending deltas? */
++ if (buffer->deltaCount != 0)
++ {
++ /* Get the state map. */
++ map = Context->map;
++
++ /* Get the first delta item. */
++ uDelta = buffer->delta;
++
++ /* Reset the vertex stream count. */
++ elementCount = 0;
++
++ /* Merge all pending deltas. */
++ for (i = 0; i < buffer->deltaCount; i += 1)
++ {
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ Context->recordArray,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++
++ /* Merge all pending states. */
++ for (j = 0; j < kDelta->recordCount; j += 1)
++ {
++ if (j >= Context->stateCount)
++ {
++ break;
++ }
++
++ /* Get the current state record. */
++ record = &recordArray[j];
++
++ /* Get the state address. */
++ address = record->address;
++
++ /* Make sure the state is a part of the mapping table. */
++ if (address >= Context->stateCount)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++
++ continue;
++ }
++
++ /* Get the state index. */
++ index = map[address].index;
++
++ /* Skip the state if not mapped. */
++ if (index == 0)
++ {
++#if gcdDEBUG
++ if ((address != 0x0594)
++ && (address != 0x0E00)
++ && (address != 0x0E03)
++ )
++ {
++#endif
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++#if gcdDEBUG
++ }
++#endif
++ continue;
++ }
++
++ /* Get the data mask. */
++ mask = record->mask;
++
++ /* Masked states that are being completly reset or regular states. */
++ if ((mask == 0) || (mask == ~0U))
++ {
++ /* Get the new data value. */
++ data = record->data;
++
++ /* Process special states. */
++ if (address == 0x0595)
++ {
++ /* Force auto-disable to be disabled. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13)));
++ }
++
++#if gcdSECURE_USER
++ /* Do we need to convert the logical address? */
++ if (Context->hint[address])
++ {
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) &data
++ ));
++ }
++#endif
++
++ /* Set new data. */
++ buffer->logical[index] = data;
++ }
++
++ /* Masked states that are being set partially. */
++ else
++ {
++ buffer->logical[index]
++ = (~mask & buffer->logical[index])
++ | (mask & record->data);
++ }
++ }
++
++ /* Get the element count. */
++ if (kDelta->elementCount != 0)
++ {
++ elementCount = kDelta->elementCount;
++ }
++
++ /* Dereference delta. */
++ kDelta->refCount -= 1;
++ gcmkASSERT(kDelta->refCount >= 0);
++
++ /* Get the next state delta. */
++ nDelta = gcmUINT64_TO_PTR(kDelta->next);
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Update the user delta pointer. */
++ uDelta = nDelta;
++ }
++
++ /* Hardware disables all input streams when the stream 0 is programmed,
++ it then reenables those streams that were explicitely programmed by
++ the software. Because of this we cannot program the entire array of
++ values, otherwise we'll get all streams reenabled, but rather program
++ only those that are actully needed by the software. */
++ if (elementCount != 0)
++ {
++ gctUINT base;
++ gctUINT nopCount;
++ gctUINT32_PTR nop;
++ gctUINT fe2vsCount = 12;
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++
++ /* Determine the base index of the vertex stream array. */
++ base = map[0x0180].index;
++
++ /* Set the proper state count. */
++ buffer->logical[base - 1]
++ = ((((gctUINT32) (buffer->logical[base - 1])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (elementCount ) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine the number of NOP commands. */
++ nopCount
++ = (fe2vsCount / 2)
++ - (elementCount / 2);
++
++ /* Determine the location of the first NOP. */
++ nop = &buffer->logical[base + (elementCount | 1)];
++
++ /* Fill the unused space with NOPs. */
++ for (i = 0; i < nopCount; i += 1)
++ {
++ if (nop >= buffer->logical + Context->totalSize)
++ {
++ break;
++ }
++
++ /* Generate a NOP command. */
++ *nop = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ /* Advance. */
++ nop += 2;
++ }
++ }
++
++ /* Reset pending deltas. */
++ buffer->deltaCount = 0;
++ buffer->delta = gcvNULL;
++ }
++
++ /* Set state delta user pointer. */
++ uDelta = StateDelta;
++
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* State delta cannot be attached to anything yet. */
++ if (kDelta->refCount != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): kDelta->refCount = %d (has to be 0).\n",
++ __FUNCTION__, __LINE__,
++ kDelta->refCount
++ );
++ }
++
++ /* Attach to all contexts. */
++ buffer = Context->buffer;
++
++ do
++ {
++ /* Attach to the context if nothing is attached yet. If a delta
++ is allready attached, all we need to do is to increment
++ the number of deltas in the context. */
++ if (buffer->delta == gcvNULL)
++ {
++ buffer->delta = uDelta;
++ }
++
++ /* Update reference count. */
++ kDelta->refCount += 1;
++
++ /* Update counters. */
++ buffer->deltaCount += 1;
++
++ /* Get the next context buffer. */
++ buffer = buffer->next;
++
++ if (buffer == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++ }
++ while (Context->buffer != buffer);
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Schedule an event to mark the context buffer as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ buffer->eventObj, buffer->signal, gcvKERNEL_PIXEL
++ ));
++
++ /* Advance to the next context buffer. */
++ Context->buffer = buffer->next;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Get access to the state records. */
++ if (kDelta != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++
++ /* Close access to the current state delta. */
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h linux-xbian-imx6/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_context.h 2015-07-27 23:13:06.170964992 +0200
+@@ -0,0 +1,157 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_context_h_
++#define __gc_hal_kernel_context_h_
++
++#include "gc_hal_kernel_buffer.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Maps state locations within the context buffer. */
++typedef struct _gcsSTATE_MAP * gcsSTATE_MAP_PTR;
++typedef struct _gcsSTATE_MAP
++{
++ /* Index of the state in the context buffer. */
++ gctUINT index;
++
++ /* State mask. */
++ gctUINT32 mask;
++}
++gcsSTATE_MAP;
++
++/* Context buffer. */
++typedef struct _gcsCONTEXT * gcsCONTEXT_PTR;
++typedef struct _gcsCONTEXT
++{
++ /* For debugging: the number of context buffer in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Context busy signal. */
++ gctSIGNAL signal;
++
++ /* Physical address of the context buffer. */
++ gctPHYS_ADDR physical;
++
++ /* Logical address of the context buffer. */
++ gctUINT32_PTR logical;
++
++ /* Pointer to the LINK commands. */
++ gctPOINTER link2D;
++ gctPOINTER link3D;
++
++ /* The number of pending state deltas. */
++ gctUINT deltaCount;
++
++ /* Pointer to the first delta to be applied. */
++ gcsSTATE_DELTA_PTR delta;
++
++ /* Next context buffer. */
++ gcsCONTEXT_PTR next;
++}
++gcsCONTEXT;
++
++/* gckCONTEXT structure that hold the current context. */
++struct _gckCONTEXT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Command buffer alignment. */
++ gctSIZE_T alignment;
++ gctSIZE_T reservedHead;
++ gctSIZE_T reservedTail;
++
++ /* Context buffer metrics. */
++ gctSIZE_T stateCount;
++ gctSIZE_T totalSize;
++ gctSIZE_T bufferSize;
++ gctUINT32 linkIndex2D;
++ gctUINT32 linkIndex3D;
++ gctUINT32 linkIndexXD;
++ gctUINT32 entryOffset3D;
++ gctUINT32 entryOffsetXDFrom2D;
++ gctUINT32 entryOffsetXDFrom3D;
++
++ /* Dirty flags. */
++ gctBOOL dirty;
++ gctBOOL dirty2D;
++ gctBOOL dirty3D;
++ gcsCONTEXT_PTR dirtyBuffer;
++
++ /* State mapping. */
++ gcsSTATE_MAP_PTR map;
++
++ /* List of context buffers. */
++ gcsCONTEXT_PTR buffer;
++
++ /* A copy of the user record array. */
++ gctUINT recordArraySize;
++ gcsSTATE_DELTA_RECORD_PTR recordArray;
++
++ /* Requested pipe select for context. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Variables used for building state buffer. */
++ gctUINT32 lastAddress;
++ gctSIZE_T lastSize;
++ gctUINT32 lastIndex;
++ gctBOOL lastFixed;
++
++ /* Hint array. */
++#if gcdSECURE_USER
++ gctBOOL_PTR hint;
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gcsPROFILER_COUNTERS latestProfiler;
++ gcsPROFILER_COUNTERS histroyProfiler;
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++#endif
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_context_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c linux-xbian-imx6/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.c 2015-07-27 23:13:06.170964992 +0200
+@@ -0,0 +1,7280 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#if VIVANTE_PROFILER_CONTEXT
++#include "gc_hal_kernel_context.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef struct _gcsiDEBUG_REGISTERS * gcsiDEBUG_REGISTERS_PTR;
++typedef struct _gcsiDEBUG_REGISTERS
++{
++ gctSTRING module;
++ gctUINT index;
++ gctUINT shift;
++ gctUINT data;
++ gctUINT count;
++ gctUINT32 signature;
++}
++gcsiDEBUG_REGISTERS;
++
++extern int gpu3DMinClock;
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gceSTATUS status;
++
++ gctUINT32 chipIdentity;
++
++ gctUINT32 streamCount = 0;
++ gctUINT32 registerMax = 0;
++ gctUINT32 threadCount = 0;
++ gctUINT32 shaderCoreCount = 0;
++ gctUINT32 vertexCacheSize = 0;
++ gctUINT32 vertexOutputBufferSize = 0;
++ gctUINT32 pixelPipes = 0;
++ gctUINT32 instructionCount = 0;
++ gctUINT32 numConstants = 0;
++ gctUINT32 bufferSize = 0;
++ gctUINT32 varyingsCount = 0;
++ gctBOOL useHZ;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /***************************************************************************
++ ** Get chip ID and revision.
++ */
++
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00018,
++ &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ Identity->chipModel = gcv500;
++ Identity->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00020,
++ (gctUINT32_PTR) &Identity->chipModel));
++
++ /* !!!! HACK ALERT !!!! */
++ /* Because people change device IDs without letting software know
++ ** about it - here is the hack to make it all look the same. Only
++ ** for GC400 family. Next time - TELL ME!!! */
++ if (((Identity->chipModel & 0xFF00) == 0x0400)
++ && (Identity->chipModel != 0x0420))
++ {
++ Identity->chipModel = (gceCHIPMODEL) (Identity->chipModel & 0x0400);
++ }
++
++ /* Read CHIP_REV register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00024,
++ &Identity->chipRevision));
++
++ if ((Identity->chipModel == gcv300)
++ && (Identity->chipRevision == 0x2201)
++ )
++ {
++ gctUINT32 chipDate;
++ gctUINT32 chipTime;
++
++ /* Read date and time registers. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00028,
++ &chipDate));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0002C,
++ &chipTime));
++
++ if ((chipDate == 0x20080814) && (chipTime == 0x12051100))
++ {
++ /* This IP has an ECO; put the correct revision in it. */
++ Identity->chipRevision = 0x1051;
++ }
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipModel=%X",
++ Identity->chipModel);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipRevision=%X",
++ Identity->chipRevision);
++
++
++ /***************************************************************************
++ ** Get chip features.
++ */
++
++ /* Read chip feature register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0001C,
++ &Identity->chipFeatures));
++
++#ifndef VIVANTE_NO_3D
++ /* Disable fast clear on GC700. */
++ if (Identity->chipModel == gcv700)
++ {
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++#endif
++
++ if (((Identity->chipModel == gcv500) && (Identity->chipRevision < 2))
++ || ((Identity->chipModel == gcv300) && (Identity->chipRevision < 0x2000))
++ )
++ {
++ /* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these registers. */
++ Identity->chipMinorFeatures = 0;
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ }
++ else
++ {
++ /* Read chip minor feature register #0. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00034,
++ &Identity->chipMinorFeatures));
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))))
++ )
++ {
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00074,
++ &Identity->chipMinorFeatures1));
++
++ /* Read chip minor featuress register #2. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00084,
++ &Identity->chipMinorFeatures2));
++
++ /*Identity->chipMinorFeatures2 &= ~(0x1 << 3);*/
++
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00088,
++ &Identity->chipMinorFeatures3));
++
++ /*The BG2 chip has no compression supertiled, and the bit of GCMinorFeature3BugFixes15 is n/a*/
++ if(Identity->chipModel == gcv1000 && Identity->chipRevision == 0x5036)
++ {
++ Identity->chipMinorFeatures3
++ = ((((gctUINT32) (Identity->chipMinorFeatures3)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ Identity->chipMinorFeatures3
++ = ((((gctUINT32) (Identity->chipMinorFeatures3)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++ }
++
++ /* Read chip minor featuress register #4. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00094,
++ &Identity->chipMinorFeatures4));
++ }
++ else
++ {
++ /* Chip doesn't has minor features register #1 or 2 or 3 or 4. */
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ }
++ }
++
++ /* Get the Supertile layout in the hardware. */
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))))
++ {
++ Identity->superTileMode = 2;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))))
++ {
++ Identity->superTileMode = 1;
++ }
++ else
++ {
++ Identity->superTileMode = 0;
++ }
++
++ /* Exception for GC1000, revision 5035 & GC800, revision 4612 */
++ if (((Identity->chipModel == gcv1000) && ((Identity->chipRevision == 0x5035)
++ || (Identity->chipRevision == 0x5036)
++ || (Identity->chipRevision == 0x5037)))
++ || ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4612))
++ || ((Identity->chipModel == gcv860) && (Identity->chipRevision == 0x4647)))
++ {
++ Identity->superTileMode = 1;
++ }
++
++ if (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5245)
++ {
++ useHZ = ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))));
++ }
++ else
++ {
++ useHZ = gcvFALSE;
++ }
++
++ if (useHZ)
++ {
++ /* Disable EZ. */
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ }
++
++ /* Disable HZ when EZ is present for older chips. */
++ else if (!((((gctUINT32) (Identity->chipFeatures)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))))
++ {
++ /* Disable HIERARCHICAL_Z. */
++ Identity->chipMinorFeatures
++ = ((((gctUINT32) (Identity->chipMinorFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++ }
++
++ /* Disable rectangle primitive when chip is gc880_5_1_0_rc6*/
++ if ((Identity->chipModel == gcv880) && (Identity->chipRevision == 0x5106))
++ {
++ /* Disable rectangle primitive. */
++ Identity->chipMinorFeatures2
++ = ((((gctUINT32) (Identity->chipMinorFeatures2)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ if ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4605))
++ {
++ /* Correct feature bit: RTL does not have such feature. */
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipFeatures=0x%08X",
++ Identity->chipFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures=0x%08X",
++ Identity->chipMinorFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures1=0x%08X",
++ Identity->chipMinorFeatures1);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures2=0x%08X",
++ Identity->chipMinorFeatures2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures3=0x%08X",
++ Identity->chipMinorFeatures3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures4=0x%08X",
++ Identity->chipMinorFeatures4);
++
++ /***************************************************************************
++ ** Get chip specs.
++ */
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ gctUINT32 specs, specs2, specs3;
++
++ /* Read gcChipSpecs register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00048,
++ &specs));
++
++ /* Extract the fields. */
++ streamCount = (((((gctUINT32) (specs)) >> (0 ? 3:0)) & ((gctUINT32) ((((1 ? 3:0) - (0 ? 3:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:0) - (0 ? 3:0) + 1)))))) );
++ registerMax = (((((gctUINT32) (specs)) >> (0 ? 7:4)) & ((gctUINT32) ((((1 ? 7:4) - (0 ? 7:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:4) - (0 ? 7:4) + 1)))))) );
++ threadCount = (((((gctUINT32) (specs)) >> (0 ? 11:8)) & ((gctUINT32) ((((1 ? 11:8) - (0 ? 11:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:8) - (0 ? 11:8) + 1)))))) );
++ shaderCoreCount = (((((gctUINT32) (specs)) >> (0 ? 24:20)) & ((gctUINT32) ((((1 ? 24:20) - (0 ? 24:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:20) - (0 ? 24:20) + 1)))))) );
++ vertexCacheSize = (((((gctUINT32) (specs)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ vertexOutputBufferSize = (((((gctUINT32) (specs)) >> (0 ? 31:28)) & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1)))))) );
++ pixelPipes = (((((gctUINT32) (specs)) >> (0 ? 27:25)) & ((gctUINT32) ((((1 ? 27:25) - (0 ? 27:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:25) - (0 ? 27:25) + 1)))))) );
++
++ /* Read gcChipSpecs2 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00080,
++ &specs2));
++
++ instructionCount = (((((gctUINT32) (specs2)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ numConstants = (((((gctUINT32) (specs2)) >> (0 ? 31:16)) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1)))))) );
++ bufferSize = (((((gctUINT32) (specs2)) >> (0 ? 7:0)) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1)))))) );
++
++ /* Read gcChipSpecs3 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0008C,
++ &specs3));
++
++ varyingsCount = (((((gctUINT32) (specs3)) >> (0 ? 8:4)) & ((gctUINT32) ((((1 ? 8:4) - (0 ? 8:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:4) - (0 ? 8:4) + 1)))))) );
++ }
++
++ /* Get the number of pixel pipes. */
++ Identity->pixelPipes = gcmMAX(pixelPipes, 1);
++
++ /* Get the stream count. */
++ Identity->streamCount = (streamCount != 0)
++ ? streamCount
++ : (Identity->chipModel >= gcv1000) ? 4 : 1;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: streamCount=%u%s",
++ Identity->streamCount,
++ (streamCount == 0) ? " (default)" : "");
++
++ /* Get the vertex output buffer size. */
++ Identity->vertexOutputBufferSize = (vertexOutputBufferSize != 0)
++ ? 1 << vertexOutputBufferSize
++ : (Identity->chipModel == gcv400)
++ ? (Identity->chipRevision < 0x4000) ? 512
++ : (Identity->chipRevision < 0x4200) ? 256
++ : 128
++ : (Identity->chipModel == gcv530)
++ ? (Identity->chipRevision < 0x4200) ? 512
++ : 128
++ : 512;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexOutputBufferSize=%u%s",
++ Identity->vertexOutputBufferSize,
++ (vertexOutputBufferSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of threads. */
++ Identity->threadCount = (threadCount != 0)
++ ? 1 << threadCount
++ : (Identity->chipModel == gcv400) ? 64
++ : (Identity->chipModel == gcv500) ? 128
++ : (Identity->chipModel == gcv530) ? 128
++ : 256;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: threadCount=%u%s",
++ Identity->threadCount,
++ (threadCount == 0) ? " (default)" : "");
++
++ /* Get the number of shader cores. */
++ Identity->shaderCoreCount = (shaderCoreCount != 0)
++ ? shaderCoreCount
++ : (Identity->chipModel >= gcv1000) ? 2
++ : 1;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: shaderCoreCount=%u%s",
++ Identity->shaderCoreCount,
++ (shaderCoreCount == 0) ? " (default)" : "");
++
++ /* Get the vertex cache size. */
++ Identity->vertexCacheSize = (vertexCacheSize != 0)
++ ? vertexCacheSize
++ : 8;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexCacheSize=%u%s",
++ Identity->vertexCacheSize,
++ (vertexCacheSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of temporary registers. */
++ Identity->registerMax = (registerMax != 0)
++ /* Maximum of registerMax/4 registers are accessible to 1 shader */
++ ? 1 << registerMax
++ : (Identity->chipModel == gcv400) ? 32
++ : 64;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: registerMax=%u%s",
++ Identity->registerMax,
++ (registerMax == 0) ? " (default)" : "");
++
++ /* Get the instruction count. */
++ Identity->instructionCount = (instructionCount == 0) ? 256
++ : (instructionCount == 1) ? 1024
++ : (instructionCount == 2) ? 2048
++ : (instructionCount == 0xFF) ? 512
++ : 256;
++
++ if (Identity->instructionCount == 256)
++ {
++ if ((Identity->chipModel == gcv2000 && Identity->chipRevision == 0x5108)
++ || Identity->chipModel == gcv880)
++ {
++ Identity->instructionCount = 512;
++ }
++ }
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ Identity->instructionCount = 512;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: instructionCount=%u%s",
++ Identity->instructionCount,
++ (instructionCount == 0) ? " (default)" : "");
++
++ /* Get the number of constants. */
++ Identity->numConstants = (numConstants == 0) ? 168 : numConstants;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: numConstants=%u%s",
++ Identity->numConstants,
++ (numConstants == 0) ? " (default)" : "");
++
++ /* Get the buffer size. */
++ Identity->bufferSize = bufferSize;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: bufferSize=%u%s",
++ Identity->bufferSize,
++ (bufferSize == 0) ? " (default)" : "");
++
++
++ if (varyingsCount != 0)
++ {
++ /* Bug 4480. */
++ /*Identity->varyingsCount = varyingsCount;*/
++ Identity->varyingsCount = 12;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures1)) >> (0 ? 23:23) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))))
++ {
++ Identity->varyingsCount = 12;
++ }
++ else
++ {
++ Identity->varyingsCount = 8;
++ }
++
++ /* For some cores, it consumes two varying for position, so the max varying vectors should minus one. */
++ if ((Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5222) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5208) ||
++ ((Identity->chipModel == gcv2100 || Identity->chipModel == gcv2000) && Identity->chipRevision == 0x5108) ||
++ (Identity->chipModel == gcv880 && (Identity->chipRevision == 0x5107 || Identity->chipRevision == 0x5106)))
++ {
++ Identity->varyingsCount -= 1;
++ }
++
++ Identity->chip2DControl = 0;
++ if (Identity->chipModel == gcv320)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x0002C,
++ &data));
++
++ if ((data != 33956864) &&
++ ((Identity->chipRevision == 0x5007) ||
++ (Identity->chipRevision == 0x5220)))
++ {
++ Identity->chip2DControl |= 0xFF &
++ (Identity->chipRevision == 0x5220 ? 8 :
++ (Identity->chipRevision == 0x5007 ? 12 : 0));
++ }
++
++ if (Identity->chipRevision == 0x5007)
++ {
++ /* Disable splitting rectangle. */
++ Identity->chip2DControl |= 0x100;
++
++ /* Enable 2D Flush. */
++ Identity->chip2DControl |= 0x200;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_PowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckHARDWARE hardware = (gckHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++static gceSTATUS
++_VerifyDMA(
++ IN gckOS Os,
++ IN gceCORE Core,
++ gctUINT32_PTR Address1,
++ gctUINT32_PTR Address2,
++ gctUINT32_PTR State1,
++ gctUINT32_PTR State2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State1));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address1));
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State2));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address2));
++
++ if (*Address1 != *Address2)
++ {
++ break;
++ }
++
++ if (*State1 != *State2)
++ {
++ break;
++ }
++ }
++
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_DumpDebugRegisters(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gcsiDEBUG_REGISTERS_PTR Descriptor
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 select;
++ gctUINT32 data = 0;
++ gctUINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Descriptor=0x%X", Os, Descriptor);
++
++ gcmkPRINT_N(4, " %s debug registers:\n", Descriptor->module);
++
++ for (i = 0; i < Descriptor->count; i += 1)
++ {
++ select = i << Descriptor->shift;
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ gcmkPRINT_N(12, " [0x%02X] 0x%08X\n", i, data);
++ }
++
++ select = 0xF << Descriptor->shift;
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ if (data == Descriptor->signature)
++ {
++ break;
++ }
++ }
++
++ if (i == 500)
++ {
++ gcmkPRINT_N(4, " failed to obtain the signature (read 0x%08X).\n", data);
++ }
++ else
++ {
++ gcmkPRINT_N(8, " signature = 0x%08X (%d read attempt(s))\n", data, i + 1);
++ }
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IsGPUPresent(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ control));
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Hardware->os,
++ Hardware->core,
++ &identity));
++
++ /* Check if these are the same values as saved before. */
++ if ((Hardware->identity.chipModel != identity.chipModel)
++ || (Hardware->identity.chipRevision != identity.chipRevision)
++ || (Hardware->identity.chipFeatures != identity.chipFeatures)
++ || (Hardware->identity.chipMinorFeatures != identity.chipMinorFeatures)
++ || (Hardware->identity.chipMinorFeatures1 != identity.chipMinorFeatures1)
++ || (Hardware->identity.chipMinorFeatures2 != identity.chipMinorFeatures2)
++ )
++ {
++ gcmkPRINT("[galcore]: GPU is not present.");
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++_FlushCache(
++ gckHARDWARE Hardware,
++ gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes, requested;
++ gctPOINTER buffer;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(Command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(Command, requested));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/******************************************************************************\
++****************************** gckHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHARDWARE_Construct
++**
++** Construct a new gckHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** OUTPUT:
++**
++** gckHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckHARDWARE
++** object.
++*/
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware = gcvNULL;
++ gctUINT16 data = 0xff00;
++ gctUINT32 axi_ot;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ /* Enable the GPU. */
++ gcmkONERROR(gckOS_SetGPUPower(Os, Core, gcvTRUE, gcvTRUE));
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Allocate the gckHARDWARE object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckHARDWARE),
++ &pointer));
++
++ hardware = (gckHARDWARE) pointer;
++
++ /* Initialize the gckHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++ hardware->core = Core;
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Os, Core, &hardware->identity));
++
++ /* Determine the hardware type */
++ switch (hardware->identity.chipModel)
++ {
++ case gcv350:
++ case gcv355:
++ hardware->type = gcvHARDWARE_VG;
++ break;
++
++ case gcv300:
++ case gcv320:
++ case gcv420:
++ hardware->type = gcvHARDWARE_2D;
++ /*set outstanding limit*/
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ break;
++
++ default:
++ hardware->type = gcvHARDWARE_3D;
++ if(hardware->identity.chipModel == gcv880)
++ {
++ /*set outstanding limit*/
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ }
++
++ if ((((((gctUINT32) (hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ))
++ {
++ hardware->type = (gceHARDWARE_TYPE) (hardware->type | gcvHARDWARE_2D);
++ }
++ }
++
++ hardware->powerBaseAddress
++ = ((hardware->identity.chipModel == gcv300)
++ && (hardware->identity.chipRevision < 0x2000))
++ ? 0x0100
++ : 0x0000;
++
++ /* _ResetGPU need powerBaseAddress. */
++ status = _ResetGPU(hardware, Os, Core);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ hardware->powerMutex = gcvNULL;
++
++ hardware->mmuVersion
++ = (((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 28:28)) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) );
++
++ /* Determine whether bug fixes #1 are present. */
++ hardware->extraEventStates = ((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ /* Check if big endian */
++ hardware->bigEndian = (*(gctUINT8 *)&data == 0xff);
++
++ /* Initialize the fast clear. */
++ gcmkONERROR(gckHARDWARE_SetFastClear(hardware, -1, -1));
++
++#if !gcdENABLE_128B_MERGE
++
++ if (((((gctUINT32) (hardware->identity.chipMinorFeatures2)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ /* 128B merge is turned on by default. Disable it. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00558, 0));
++ }
++
++#endif
++
++ /* Set power state to ON. */
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++ hardware->lastWaitLink = ~0U;
++ hardware->globalSemaphore = gcvNULL;
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ hardware->powerOnFscaleVal = 64;
++#endif
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &hardware->powerMutex));
++ gcmkONERROR(gckOS_CreateSemaphore(Os, &hardware->globalSemaphore));
++ hardware->startIsr = gcvNULL;
++ hardware->stopIsr = gcvNULL;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _PowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++#if gcdLINK_QUEUE_SIZE
++ hardware->linkQueue.front = 0;
++ hardware->linkQueue.rear = 0;
++ hardware->linkQueue.count = 0;
++#endif
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Disable profiler by default */
++ hardware->gpuProfiler = gcvFALSE;
++
++ /* Return pointer to the gckHARDWARE object. */
++ *Hardware = hardware;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Hardware=0x%x", *Hardware);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, Core, gcvFALSE, gcvFALSE));
++
++ if (hardware->globalSemaphore != gcvNULL)
++ {
++ /* Destroy the global semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Os,
++ hardware->globalSemaphore));
++ }
++
++ if (hardware->powerMutex != gcvNULL)
++ {
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, hardware));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Destroy
++**
++** Destroy an gckHARDWARE object.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Destroy the power semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Hardware->os, Hardware->powerMutex));
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Hardware->os, Hardware));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_GetType
++**
++** Get the hardware type.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gceHARDWARE_TYPE * Type
++** Pointer to a variable that receives the type of hardware object.
++*/
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkVERIFY_ARGUMENT(Type != gcvNULL);
++
++ *Type = Hardware->type;
++
++ gcmkFOOTER_ARG("*Type=%d", *Type);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_InitializeHardware
++**
++** Initialize the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++ gctUINT32 chipRev;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the chip revision register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00024,
++ &chipRev));
++
++ if (chipRev != Hardware->identity.chipRevision)
++ {
++ /* Chip is not there! */
++ gcmkONERROR(gcvSTATUS_CONTEXT_LOSSED);
++ }
++
++ /* Disable isolate GPU bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)))));
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ /* Reset memory counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ ~0U));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++
++ /* Get the system's physical base address. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Program the base addesses. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0041C,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00418,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00428,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00420,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00424,
++ baseAddress));
++
++#if !VIVANTE_PROFILER
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ /* Enable clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ if ((Hardware->identity.chipRevision == 0x4301)
++ || (Hardware->identity.chipRevision == 0x4302)
++ )
++ {
++ /* Disable stall module level clock gating for 4.3.0.1 and 4.3.0.2
++ ** revisions. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++
++#ifndef VIVANTE_NO_3D
++ /* Disable PE clock gating on revs < 5.0 when HZ is present without a
++ ** bug fix. */
++ if ((Hardware->identity.chipRevision < 0x5000)
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))))
++ )
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable PE clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++#endif
++ }
++#endif
++
++ /* Special workaround for this core
++ ** Make sure pulse eater kicks in only when SH is idle */
++ if (Hardware->identity.chipModel == gcv4000 &&
++ Hardware->identity.chipRevision == 0x5208)
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)))));
++ }
++
++ if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) == gcvFALSE)
++ || (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) && (Hardware->identity.chipRevision < 0x5422))
++ )
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15)));
++
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ /* Special workaround for this core
++ ** Make sure FE and TX are on different buses */
++ if ((Hardware->identity.chipModel == gcv2000)
++ && (Hardware->identity.chipRevision == 0x5108))
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ &data));
++
++ /* Set FE bus to one, TX bus to zero */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ data));
++ }
++
++ /* Test if MMU is initialized. */
++ if ((Hardware->kernel != gcvNULL)
++ && (Hardware->kernel->mmu != gcvNULL)
++ )
++ {
++ /* Reset MMU. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Hardware,
++ Hardware->kernel->mmu->pageTableLogical));
++ }
++ }
++
++ if (Hardware->identity.chipModel >= gcv400
++ && Hardware->identity.chipModel != gcv420
++ && (((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 15:15) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) != gcvTRUE)
++ )
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable PA clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++#if gcdHZ_L2_DISALBE
++ /* Disable HZ-L2. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) == gcvTRUE ||
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) == gcvTRUE)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++#endif
++
++ /* Limit 2D outstanding request. */
++ if(Hardware->identity.chipModel == gcv880)
++ {
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x10;
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00414, axi_ot));
++ }
++
++ if (Hardware->identity.chip2DControl & 0xFF)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (Hardware->identity.chip2DControl & 0xFF) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ /* Update GPU AXI cache atttribute. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00008,
++ 0x00002200));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*InternalSize=%lu *InternalBaseAddress=0x%08x "
++ "*InternalAlignment=0x%08x *ExternalSize=%lu "
++ "*ExternalBaseAddress=0x%08x *ExtenalAlignment=0x%08x "
++ "*HorizontalTileSize=%u *VerticalTileSize=%u",
++ gcmOPT_VALUE(InternalSize),
++ gcmOPT_VALUE(InternalBaseAddress),
++ gcmOPT_VALUE(InternalAlignment),
++ gcmOPT_VALUE(ExternalSize),
++ gcmOPT_VALUE(ExternalBaseAddress),
++ gcmOPT_VALUE(ExternalAlignment),
++ gcmOPT_VALUE(HorizontalTileSize),
++ gcmOPT_VALUE(VerticalTileSize));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++** Pointer to the identity structure.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gctUINT32 features;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Identity != gcvNULL);
++
++ /* Return chip model and revision. */
++ Identity->chipModel = Hardware->identity.chipModel;
++ Identity->chipRevision = Hardware->identity.chipRevision;
++
++ /* Return feature set. */
++ features = Hardware->identity.chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Override fast clear by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ if ((((((gctUINT32) (features)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ))
++ {
++ /* Override compression by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (Hardware->allowCompression) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 through GC500.2 and GC300,
++ ** since they did not have this bit. */
++ if (((Hardware->identity.chipModel == gcv500) && (Hardware->identity.chipRevision <= 2))
++ || (Hardware->identity.chipModel == gcv300)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ Identity->chipFeatures = features;
++
++ /* Return minor features. */
++ Identity->chipMinorFeatures = Hardware->identity.chipMinorFeatures;
++ Identity->chipMinorFeatures1 = Hardware->identity.chipMinorFeatures1;
++ Identity->chipMinorFeatures2 = Hardware->identity.chipMinorFeatures2;
++ Identity->chipMinorFeatures3 = Hardware->identity.chipMinorFeatures3;
++ Identity->chipMinorFeatures4 = Hardware->identity.chipMinorFeatures4;
++
++ /* Return chip specs. */
++ Identity->streamCount = Hardware->identity.streamCount;
++ Identity->registerMax = Hardware->identity.registerMax;
++ Identity->threadCount = Hardware->identity.threadCount;
++ Identity->shaderCoreCount = Hardware->identity.shaderCoreCount;
++ Identity->vertexCacheSize = Hardware->identity.vertexCacheSize;
++ Identity->vertexOutputBufferSize = Hardware->identity.vertexOutputBufferSize;
++ Identity->pixelPipes = Hardware->identity.pixelPipes;
++ Identity->instructionCount = Hardware->identity.instructionCount;
++ Identity->numConstants = Hardware->identity.numConstants;
++ Identity->bufferSize = Hardware->identity.bufferSize;
++ Identity->varyingsCount = Hardware->identity.varyingsCount;
++ Identity->superTileMode = Hardware->identity.superTileMode;
++ Identity->chip2DControl = Hardware->identity.chip2DControl;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Addres=0x%08x", Hardware, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 31:31)) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x1:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = (((((gctUINT32) (Address)) >> (0 ? 30:0)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1)))))) );
++ }
++ else
++ {
++ *Pool = gcvPOOL_SYSTEM;
++ *Offset = Address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Pool=%d *Offset=0x%08x", *Pool, *Offset);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of command buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes for the prefetch unit (until after the first LINK).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Physical,
++ IN gctBOOL PhysicalAddresses,
++#endif
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0, control;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Bytes=%lu",
++ Hardware, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++#ifdef __QNXNTO__
++ if (PhysicalAddresses && (Hardware->mmuVersion == 0))
++ {
++ /* Convert physical into hardware specific address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertPhysical(Hardware, Physical, &address));
++ }
++ else
++ {
++#endif
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, Logical, &address));
++#ifdef __QNXNTO__
++ }
++#endif
++
++ /* Enable all events. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00014, ~0U));
++
++ /* Write address register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00654, address));
++
++ /* Build control register. */
++ control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ /* Set big endian */
++ if (Hardware->bigEndian)
++ {
++ control |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20)));
++ }
++
++ /* Write control register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Started command buffer @ 0x%08x",
++ address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_WaitLink
++**
++** Append a WAIT/LINK command sequence at the specified location in the command
++** queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT/LINK command sequence at or gcvNULL just to query the size of the
++** WAIT/LINK command sequence.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT/LINK command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the WAIT/LINK command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitOffset
++** Pointer to a variable that will receive the offset of the WAIT command
++** from the specified logcial pointer.
++** If 'WaitOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitSize
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command. If 'LinkSize' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctSIZE_T * WaitSize
++ )
++{
++ static const gctUINT waitCount = 200;
++
++ gceSTATUS status;
++ gctUINT32 address;
++ gctUINT32_PTR logical;
++ gctSIZE_T bytes;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x *Bytes=%lu",
++ Hardware, Logical, Offset, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical != gcvNULL) || (Bytes != gcvNULL));
++
++ /* Compute number of bytes required. */
++#if gcd6000_SUPPORT
++ bytes = gcmALIGN(Offset + 96, 8) - Offset;
++#else
++ bytes = gcmALIGN(Offset + 16, 8) - Offset;
++#endif
++
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (logical != gcvNULL)
++ {
++ /* Not enough space? */
++ if (*Bytes < bytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, &address));
++
++ /* Store the WAIT/LINK address. */
++ Hardware->lastWaitLink = address;
++
++ /* Append WAIT(count). */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (waitCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcd6000_SUPPORT
++ /* Send FE-PE sempahore token. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Send FE-PE stall token. */
++ logical[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /*************************************************************/
++ /* Enable chip ID 0. */
++ logical[6] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (1 << 0);
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[8] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[9] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[10] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[11] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /*************************************************************/
++ /* Enable chip ID 1. */
++ logical[12] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (1 << 1);
++
++ /* Send semaphore from FE to ChipID 1. */
++ logical[14] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[15] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /* Wait for semaphore from ChipID 0. */
++ logical[16] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ logical[17] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x0F & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:24) - (0 ? 27:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:24) - (0 ? 27:24) + 1))))))) << (0 ? 27:24)));
++
++ /*************************************************************/
++ /* Enable all chips. */
++ logical[18] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | (0xFFFF);
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[20]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[21]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append LINK(2, address). */
++ logical[22]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[23] = address;
++#else
++ /* Append LINK(2, address). */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 8, address, bytes
++ );
++#endif
++
++ if (WaitOffset != gcvNULL)
++ {
++ /* Return the offset pointer to WAIT command. */
++ *WaitOffset = 0;
++ }
++
++ if (WaitSize != gcvNULL)
++ {
++ /* Return number of bytes used by the WAIT command. */
++ *WaitSize = 8;
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT/LINK command
++ ** sequence. */
++ *Bytes = bytes;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *WaitOffset=0x%x *WaitSize=%lu",
++ gcmOPT_VALUE(Bytes), gcmOPT_VALUE(WaitOffset),
++ gcmOPT_VALUE(WaitSize));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_End
++**
++** Append an END command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** END command at or gcvNULL just to query the size of the END command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append END. */
++ logical[0] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: END", Logical);
++
++ /* Make sure the CPU writes out the data to memory. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Nop
++**
++** Append a NOP command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** NOP command at or gcvNULL just to query the size of the NOP command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the NOP command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append NOP. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: NOP", Logical);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the NOP command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Wait
++**
++** Append a WAIT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT command at or gcvNULL just to query the size of the WAIT command.
++**
++** gctUINT32 Count
++** Number of cycles to wait.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Wait(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Count,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Count=%u *Bytes=%lu",
++ Hardware, Logical, Count, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append WAIT. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 address;
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Hardware, logical, &address
++ ));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, Count
++ );
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Event
++**
++** Append an EVENT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the EVENT command at or gcvNULL just to query the size of the EVENT
++** command.
++**
++** gctUINT8 Event
++** Event ID to program.
++**
++** gceKERNEL_WHERE FromWhere
++** Location of the pipe to send the event.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the EVENT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT size;
++ gctUINT32 destination = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Event=%u FromWhere=%d *Bytes=%lu",
++ Hardware, Logical, Event, FromWhere, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++ gcmkVERIFY_ARGUMENT(Event < 32);
++
++ /* Determine the size of the command. */
++
++ size = (Hardware->extraEventStates && (FromWhere == gcvKERNEL_PIXEL))
++ ? gcmALIGN(8 + (1 + 5) * 4, 8) /* EVENT + 5 STATES */
++ : 8;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < size)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ switch (FromWhere)
++ {
++ case gcvKERNEL_COMMAND:
++ /* From command processor. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ break;
++
++ case gcvKERNEL_PIXEL:
++ /* From pixel engine. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Append EVENT(Event, destiantion). */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = ((((gctUINT32) (destination)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (Event) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ /* Make sure the event ID gets written out before GPU can access it. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 phys;
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &phys);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: EVENT %d", phys, Event);
++ }
++#endif
++
++ /* Append the extra states. These are needed for the chips that do not
++ ** support back-to-back events due to the async interface. The extra
++ ** states add the necessary delay to ensure that event IDs do not
++ ** collide. */
++ if (size > 8)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0100) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++ logical[3] = 0;
++ logical[4] = 0;
++ logical[5] = 0;
++ logical[6] = 0;
++ logical[7] = 0;
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT command. */
++ *Bytes = size;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_PipeSelect
++**
++** Append a PIPESELECT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the PIPESELECT command at or gcvNULL just to query the size of the
++** PIPESELECT command.
++**
++** gcePIPE_SELECT Pipe
++** Pipe value to select.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the PIPESELECT command.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the PIPESELECT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Pipe=%d *Bytes=%lu",
++ Hardware, Logical, Pipe, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Append a PipeSelect. */
++ if (Logical != gcvNULL)
++ {
++ gctUINT32 flush, stall;
++
++ if (*Bytes < 32)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ flush = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++
++ stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1]
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ /* LoadState(AQSempahore, 1), stall. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: SEMAPHORE 0x%x", logical + 2, stall);
++
++ /* Stall, stall. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ logical[5] = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: STALL 0x%x", logical + 4, stall);
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ logical[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[7] = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: PIPE %d", logical + 6, Pipe);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the PIPESELECT command. */
++ *Bytes = 32;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Link
++**
++** Append a LINK command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the LINK command at or gcvNULL just to query the size of the LINK
++** command.
++**
++** gctPOINTER FetchAddress
++** Logical address of destination of LINK.
++**
++** gctSIZE_T FetchSize
++** Number of bytes in destination of LINK.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the LINK command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the LINK command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER FetchAddress,
++ IN gctSIZE_T FetchSize,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctUINT32 address;
++ gctUINT32 link;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x FetchAddress=0x%x FetchSize=%lu "
++ "*Bytes=%lu",
++ Hardware, Logical, FetchAddress, FetchSize,
++ gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical address to hardware address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, FetchAddress, &address));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical + 1, address));
++
++ /* Make sure the address got written before the LINK command. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++ /* Compute number of 64-byte aligned bytes to fetch. */
++ bytes = gcmALIGN(address + FetchSize, 8) - address;
++
++ /* Append LINK(bytes / 8), FetchAddress. */
++ link = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical, link));
++
++ /* Memory barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical));
++
++#if gcdLINK_QUEUE_SIZE && gcdVIRTUAL_COMMAND_BUFFER
++ if (address >= 0x80000000)
++ {
++ gckLINKQUEUE_Enqueue(&Hardware->linkQueue, address, address + bytes);
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_UpdateQueueTail
++**
++** Update the tail of the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the start of the command queue.
++**
++** gctUINT32 Offset
++** Offset into the command queue of the tail (last command).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x",
++ Hardware, Logical, Offset);
++
++ /* Verify the hardware. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Force a barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ /* Notify gckKERNEL object of change. */
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ status = gckKERNEL_GetGPUAddress(Hardware->kernel, Logical, Address);
++
++ if (status == gcvSTATUS_INVALID_ADDRESS)
++#endif
++ {
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertPhysical
++**
++** Convert a physical address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPHYS_ADDR Physical
++** Physical address to convert.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertPhysical(
++ IN gckHARDWARE Hardware,
++ IN gctPHYS_ADDR Physical,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x", Hardware, Physical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ address = gcmPTR2INT(Physical);
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkVERIFY_OK(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Interrupt
++**
++** Process an interrupt.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL InterruptValid
++** If gcvTRUE, this function will read the interrupt acknowledge
++** register, stores the data, and return whether or not the interrupt
++** is ours or not. If gcvFALSE, this functions will read the interrupt
++** acknowledge register and combine it with any stored value to handle
++** the event notifications.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL InterruptValid
++ )
++{
++ gckEVENT eventObj;
++ gctUINT32 data;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x InterruptValid=%d", Hardware, InterruptValid);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Extract gckEVENT object. */
++ eventObj = Hardware->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ if (InterruptValid)
++ {
++ /* Read AQIntrAcknowledge register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++
++ if (data == 0)
++ {
++ /* Not our interrupt. */
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ }
++ else
++ {
++ /* Inform gckEVENT of the interrupt. */
++ status = gckEVENT_Interrupt(eventObj, data);
++ }
++ }
++ else
++ {
++ /* Handle events. */
++ status = gckEVENT_Notify(eventObj, 0);
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryCommandBuffer
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Alignment
++** Pointer to a variable receiving the alignment for each command.
++**
++** gctSIZE_T * ReservedHead
++** Pointer to a variable receiving the number of reserved bytes at the
++** head of each command buffer.
++**
++** gctSIZE_T * ReservedTail
++** Pointer to a variable receiving the number of bytes reserved at the
++** tail of each command buffer.
++*/
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * Alignment,
++ OUT gctSIZE_T * ReservedHead,
++ OUT gctSIZE_T * ReservedTail
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Alignment != gcvNULL)
++ {
++ /* Align every 8 bytes. */
++ *Alignment = 8;
++ }
++
++ if (ReservedHead != gcvNULL)
++ {
++ /* Reserve space for SelectPipe(). */
++ *ReservedHead = 32;
++ }
++
++ if (ReservedTail != gcvNULL)
++ {
++ /* Reserve space for Link(). */
++ *ReservedTail = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Alignment=%lu *ReservedHead=%lu *ReservedTail=%lu",
++ gcmOPT_VALUE(Alignment), gcmOPT_VALUE(ReservedHead),
++ gcmOPT_VALUE(ReservedTail));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = 1U << 31;
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*SystemSize=%lu *SystemBaseAddress=%lu",
++ gcmOPT_VALUE(SystemSize), gcmOPT_VALUE(SystemBaseAddress));
++ return gcvSTATUS_OK;
++}
++
++#ifndef VIVANTE_NO_3D
++/*******************************************************************************
++**
++** gckHARDWARE_QueryShaderCaps
++**
++** Query the shader capabilities.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT * VertexUniforms
++** Pointer to a variable receiving the number of uniforms in the vertex
++** shader.
++**
++** gctUINT * FragmentUniforms
++** Pointer to a variable receiving the number of uniforms in the
++** fragment shader.
++**
++** gctUINT * Varyings
++** Pointer to a variable receiving the maimum number of varyings.
++*/
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ )
++{
++ gctUINT32 vsConstMax;
++ gctUINT32 psConstMax;
++
++ gcmkHEADER_ARG("Hardware=0x%x VertexUniforms=0x%x "
++ "FragmentUniforms=0x%x Varyings=0x%x",
++ Hardware, VertexUniforms,
++ FragmentUniforms, Varyings);
++
++ if ((Hardware->identity.chipModel == gcv2000)
++ && (Hardware->identity.chipRevision == 0x5118))
++ {
++ vsConstMax = 256;
++ psConstMax = 64;
++ }
++ else if (Hardware->identity.numConstants > 256)
++ {
++ vsConstMax = 256;
++ psConstMax = 256;
++ }
++ else if (Hardware->identity.numConstants == 256)
++ {
++ vsConstMax = 256;
++ psConstMax = 256;
++ }
++ else
++ {
++ vsConstMax = 168;
++ psConstMax = 64;
++ }
++
++ if (VertexUniforms != gcvNULL)
++ {
++ *VertexUniforms = vsConstMax;
++ }
++
++ if (FragmentUniforms != gcvNULL)
++ {
++ *FragmentUniforms = psConstMax;
++ }
++
++ if (Varyings != gcvNULL)
++ {
++ /* Return the shader varyings count. */
++ *Varyings = Hardware->identity.varyingsCount;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert the logical address into an hardware address. */
++ gcmkONERROR(
++ gckHARDWARE_ConvertLogical(Hardware, Logical, &address));
++
++ /* Also get the base address - we need a real physical address. */
++ gcmkONERROR(
++ gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setting page table to 0x%08X",
++ address + baseAddress);
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00400,
++ address + baseAddress));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00410,
++ address + baseAddress));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00404,
++ address + baseAddress));
++
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00408,
++ address + baseAddress));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0040C,
++ address + baseAddress));
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctSIZE_T bufferSize;
++ gctBOOL commitEntered = gcvFALSE;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 flushSize;
++ gctUINT32 count;
++ gctUINT32 physical;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ /* Flush the memory controller. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, 8, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(gckCOMMAND_Execute(command, 8));
++ }
++ else
++ {
++ flushSize = 16 * 4;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, flushSize, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ count = (bufferSize - flushSize + 7) >> 3;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(command->os, buffer, &physical));
++
++ /* Flush cache. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[7]
++ = physical + 8 * gcmSIZEOF(gctUINT32);
++
++ /* Flush MMU cache. */
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9]
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[11]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[12]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[13]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[14]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[15]
++ = physical + flushSize;
++
++ gcmkONERROR(gckCOMMAND_Execute(command, flushSize));
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ commitEntered = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Hardware->kernel->command,
++ gcvFALSE));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMUv2
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMUv2(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Enable,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gctUINT32 config, address;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctSIZE_T bufferSize;
++ gctBOOL commitEntered = gcvFALSE;
++ gctPOINTER pointer = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL config2D;
++ gctSIZE_T configSize;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d", Hardware, Enable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ config2D = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_2D);
++
++ configSize = 4 * 4;
++
++ if (config2D)
++ {
++ configSize +=
++ /* Pipe Select. */
++ 4 * 4
++ /* Configure MMU States. */
++ + 4 * 4;
++ }
++
++ /* Convert logical address into physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, MtlbAddress, &config));
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, SafeAddress, &address));
++
++ if (address & 0x3F)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ switch (Mode)
++ {
++ case gcvMMU_MODE_1K:
++ if (config & 0x3FF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ case gcvMMU_MODE_4K:
++ if (config & 0xFFF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
++ commitEntered = gcvTRUE;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, configSize, &pointer, &bufferSize
++ ));
++
++ buffer = pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1] = config;
++
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[3] = address;
++
++ if (config2D)
++ {
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[5] = 0x1;
++
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[7] = config;
++
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9] = address;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[11] = 0x0;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setup MMU: config=%08x, Safe Address=%08x\n.", config, address);
++
++ gcmkONERROR(gckCOMMAND_Execute(command, configSize));
++
++ if (FromPower == gcvFALSE)
++ {
++ /* Acquire global semaphore to suspend power management until MMU
++ ** is enabled. And acquired it before gckCOMMAND_ExitCommit to
++ ** make sure GPU keeps ON. */
++ gcmkONERROR(
++ gckOS_AcquireSemaphore(Hardware->os, Hardware->globalSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
++ commitEntered = gcvFALSE;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "call gckCOMMAND_Stall to make sure the config is done.\n ");
++
++ gcmkONERROR(gckCOMMAND_Stall(command, FromPower));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Enable MMU through GCREG_MMU_CONTROL.");
++
++ /* Enable MMU. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0018C,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Enable) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ if (FromPower == gcvFALSE)
++ {
++ /* Relase global semaphore. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, Hardware->globalSemaphore));
++
++ acquired = gcvFALSE;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "call gckCOMMAND_Stall to check MMU available.\n");
++
++ gcmkONERROR(gckCOMMAND_Stall(command, FromPower));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "The MMU is available.\n");
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Hardware->kernel->command,
++ FromPower));
++ }
++
++ if (acquired)
++ {
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, Hardware->globalSemaphore));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Index=%u Offset=%u", Hardware, Index, Offset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ *Address = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (Offset | (Index << 12)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle = 0;
++ gctINT retry, poll, pollCount;
++
++ gcmkHEADER_ARG("Hardware=0x%x Wait=%d", Hardware, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++
++ /* If we have to wait, try 100 polls per millisecond. */
++ pollCount = Wait ? 100 : 1;
++
++ /* At most, try for 1 second. */
++ for (retry = 0; retry < 1000; ++retry)
++ {
++ /* If we have to wait, try 100 polls per millisecond. */
++ for (poll = pollCount; poll > 0; --poll)
++ {
++ /* Read register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* See if we have to wait for FE idle. */
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* FE is idle. */
++ break;
++ }
++ }
++
++ /* Check if we need to wait for FE and FE is busy. */
++ if (Wait && !(((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Wait a little. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "%s: Waiting for idle: 0x%08X",
++ __FUNCTION__, idle);
++
++ gcmkVERIFY_OK(gckOS_Delay(Hardware->os, 1));
++ }
++ else
++ {
++ break;
++ }
++ }
++
++ /* Return idle to caller. */
++ *Data = idle;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32 pipe;
++ gctUINT32 flush = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++ gctBOOL fcFlushStall;
++ gctUINT32 reserveBytes = 8;
++
++ gcmkHEADER_ARG("Hardware=0x%x Flush=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Flush, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get current pipe. */
++ pipe = Hardware->kernel->command->pipeSelect;
++
++ fcFlushStall
++ = ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 31:31) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))))
++ && (Flush == gcvFLUSH_ALL)
++ ;
++
++ if (fcFlushStall)
++ {
++ reserveBytes += 8;
++ }
++
++ /* Flush 3D color cache. */
++ if ((Flush & gcvFLUSH_COLOR) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ /* Flush 3D depth cache. */
++ if ((Flush & gcvFLUSH_DEPTH) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Flush 3D texture cache. */
++ if ((Flush & gcvFLUSH_TEXTURE) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++
++ /* Flush 2D cache. */
++ if ((Flush & gcvFLUSH_2D) && (pipe == 0x1))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ }
++
++ /* See if there is a valid flush. */
++ if (flush == 0)
++ {
++ if (Bytes != gcvNULL)
++ {
++ /* No bytes required. */
++ *Bytes = 0;
++ }
++ }
++
++ else
++ {
++ /* Copy to command queue. */
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < reserveBytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append LOAD_STATE to AQFlush. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ if (fcFlushStall)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[3] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical + 3, logical[3]);
++ }
++
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* bytes required. */
++ *Bytes = reserveBytes;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d Compression=%d",
++ Hardware, Enable, Compression);
++
++ /* Only process if fast clear is available. */
++ if ((((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ if (Enable == -1)
++ {
++ /* Determine automatic value for fast clear. */
++ Enable = ((Hardware->identity.chipModel != gcv500)
++ || (Hardware->identity.chipRevision >= 3)
++ ) ? 1 : 0;
++ }
++
++ if (Compression == -1)
++ {
++ /* Determine automatic value for compression. */
++ Compression = Enable
++ & (((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) );
++ }
++
++ /* Read AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &debug));
++
++ /* Set fast clear bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++ if (
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) ||
++ (Hardware->identity.chipModel >= gcv4000))
++ {
++ /* Set compression bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21))) | (((gctUINT32) ((gctUINT32) (Compression == 0) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21)));
++ }
++
++ /* Write back AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ debug));
++
++ /* Store fast clear and comprersison flags. */
++ Hardware->allowFastClear = Enable;
++ Hardware->allowCompression = Compression;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "FastClear=%d Compression=%d", Enable, Compression);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++}
++gcePOWER_FLAGS;
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++static gctCONST_STRING
++_PowerEnum(gceCHIPPOWERSTATE State)
++{
++ const gctCONST_STRING states[] =
++ {
++ gcmSTRING(gcvPOWER_ON),
++ gcmSTRING(gcvPOWER_OFF),
++ gcmSTRING(gcvPOWER_IDLE),
++ gcmSTRING(gcvPOWER_SUSPEND),
++ gcmSTRING(gcvPOWER_SUSPEND_ATPOWERON),
++ gcmSTRING(gcvPOWER_OFF_ATPOWERON),
++ gcmSTRING(gcvPOWER_IDLE_BROADCAST),
++ gcmSTRING(gcvPOWER_SUSPEND_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_RECOVERY),
++ gcmSTRING(gcvPOWER_ON_AUTO)
++ };
++
++ if ((State >= gcvPOWER_ON) && (State <= gcvPOWER_ON_AUTO))
++ {
++ return states[State - gcvPOWER_ON];
++ }
++
++ return "unknown";
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag, clock;
++ gctPOINTER buffer;
++ gctSIZE_T bytes, requested;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL broadcast = gcvFALSE;
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++ gctUINT32 process, thread;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL commandStarted = gcvFALSE;
++ gctBOOL isrStarted = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++ gctBOOL global = gcvFALSE;
++ gctBOOL globalAcquired = gcvFALSE;
++ gctBOOL configMmu = gcvFALSE;
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_RELEASE,
++ /* OFF */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ /* Clocks. */
++ static const gctUINT clocks[4] =
++ {
++ /* gcvPOWER_ON */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (64) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_OFF */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_IDLE */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_SUSPEND */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d(%s)",
++ State, _PowerEnum(State));
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Before we grab locks see if this is actually a needed change */
++ if (State == Hardware->chipPowerState)
++ return gcvSTATUS_OK;
++
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++#if gcdPOWEROFF_TIMEOUT
++ else if(State == gcvPOWER_OFF && timeout == gcvTRUE)
++ {
++ /*
++ ** try to aqcuire the mutex with more milliseconds,
++ ** flush_delayed_work should be running with timeout,
++ ** so waiting here will cause deadlock */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, gcdPOWEROFF_TIMEOUT);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gckOS_Print("GPU Timer deadlock, exit by timeout!!!!\n");
++
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ }
++#endif
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ clock = clocks[State];
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (State == gcvPOWER_ON)
++ {
++ clock = ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (Hardware->powerOnFscaleVal) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)));
++ }
++#endif
++
++ if (State == gcvPOWER_SUSPEND && Hardware->chipPowerState == gcvPOWER_OFF && broadcast)
++ {
++#if gcdPOWER_SUSNPEND_WHEN_IDLE
++ /* Do nothing */
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++#else
++ /* Clock should be on when switch power from off to suspend */
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) ;
++#endif
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE or SUSPEND. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Power Off GPU[%d] at %u [supposed to be at %u]",
++ Hardware->core, currentTime, Hardware->powerOffTime);
++ }
++
++ if (State == gcvPOWER_ON || State == gcvPOWER_OFF)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* If this is an internal power management, we have to check if we can grab
++ ** the global power semaphore. If we cannot, we have to wait until the
++ ** external world changes power management. */
++ if (!global)
++ {
++ /* Try to acquire the global semaphore. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from thread routine which should NEVER sleep.*/
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Release the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Releasing the power mutex.");
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Wait for the semaphore. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Waiting for global semaphore.");
++ gcmkONERROR(gckOS_AcquireSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvTRUE;
++
++ /* Acquire the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Reacquiring the power mutex.");
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ mutexAcquired = gcvTRUE;
++
++ /* chipPowerState may be changed by external world during the time
++ ** we give up powerMutex, so updating flag now is necessary. */
++ flag = flags[Hardware->chipPowerState][State];
++
++ if (flag == 0)
++ {
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Error. */
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore again. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ else
++ {
++ if (State == gcvPOWER_OFF || State == gcvPOWER_SUSPEND || State == gcvPOWER_IDLE)
++ {
++ /* Acquire the global semaphore if it has not been acquired. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_OK)
++ {
++ globalAcquired = gcvTRUE;
++ }
++ else if (status != gcvSTATUS_TIMEOUT)
++ {
++ /* Other errors. */
++ gcmkONERROR(status);
++ }
++ /* Ignore gcvSTATUS_TIMEOUT and leave globalAcquired as gcvFALSE.
++ ** gcvSTATUS_TIMEOUT means global semaphore has already
++ ** been acquired before this operation, so even if we fail,
++ ** we should not release it in our error handling. It should be
++ ** released by the next successful global gcvPOWER_ON. */
++ }
++
++ /* Global power management can't be aborted, so sync with
++ ** proceeding last commit. */
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ for (;;)
++ {
++ /* Check if GPU is present and awake. */
++ status = _IsGPUPresent(Hardware);
++
++ /* Check if the GPU is not responding. */
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Turn off the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvFALSE, gcvFALSE));
++
++ Hardware->clockState = gcvFALSE;
++ Hardware->powerState = gcvFALSE;
++
++ /* Wait a little. */
++ gckOS_Delay(os, 1);
++
++ /* Turn on the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ /* We need to initialize the hardware and start the command
++ * processor. */
++ flag |= gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_START;
++ }
++ else
++ {
++ /* Test for error. */
++ gcmkONERROR(status);
++
++ /* Break out of loop. */
++ break;
++ }
++ }
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ gctBOOL idle;
++ gctINT32 atomValue;
++
++ /* For global operation, all pending commits have already been
++ ** blocked by globalSemaphore or powerSemaphore.*/
++ if (!global)
++ {
++ /* Check commit atom. */
++ gcmkONERROR(gckOS_AtomGet(os, command->atomCommit, &atomValue));
++
++ if (atomValue > 0)
++ {
++ /* Commits are pending - abort power management. */
++ status = broadcast ? gcvSTATUS_CHIP_NOT_READY
++ : gcvSTATUS_MORE_DATA;
++ goto OnError;
++ }
++ }
++
++ if (broadcast)
++ {
++ /* Check for idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Hardware, &idle));
++
++ if (!idle)
++ {
++ status = gcvSTATUS_CHIP_NOT_READY;
++ goto OnError;
++ }
++ }
++
++ else
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvTRUE));
++ commitEntered = gcvTRUE;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ commitEntered = gcvFALSE;
++
++ /* Wait to finish all commands. */
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE));
++ }
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ /* Stop the Isr. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++ }
++
++ /* Flush Cache before Power Off. */
++ if (flag & gcvPOWER_FLAG_POWER_OFF)
++ {
++ if (Hardware->clockState == gcvFALSE)
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ gcvTRUE,
++ gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++
++ if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE)
++ {
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clocks[0]));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clocks[0])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++ }
++
++ gcmkONERROR(gckCOMMAND_Start(command));
++
++ gcmkONERROR(_FlushCache(Hardware, command));
++
++ gckOS_Delay(gcvNULL, 1);
++
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ flag |= gcvPOWER_FLAG_CLOCK_OFF;
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState
++ /* Don't touch clock control if dynamic frequency scaling is available. */
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE
++ )
++ {
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ if (Hardware->identity.chipModel == gcv4000
++ && Hardware->identity.chipRevision == 0x5208)
++ {
++ clock &= ~2U;
++ }
++ }
++
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(Hardware,
++ Hardware->allowFastClear,
++ Hardware->allowCompression));
++
++ /* Force the command queue to reload the next context. */
++ command->currContext = gcvNULL;
++
++ /* Need to config mmu after command start. */
++ configMmu = gcvTRUE;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ /* Start the command processor. */
++ gcmkONERROR(gckCOMMAND_Start(command));
++ commandStarted = gcvTRUE;
++
++ if (Hardware->startIsr)
++ {
++ /* Start the Isr. */
++ gcmkONERROR(Hardware->startIsr(Hardware->isrContext, Hardware->core));
++ isrStarted = gcvTRUE;
++ }
++
++ /* Set NEW MMU. */
++ if (Hardware->mmuVersion != 0 && configMmu)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ Hardware,
++ gcvTRUE,
++ Hardware->kernel->mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Hardware->kernel->mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvTRUE
++ ));
++ }
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++
++ if (global)
++ {
++ /* Verify global semaphore has been acquired already before
++ ** we release it.
++ ** If it was acquired, gckOS_TryAcquireSemaphore will return
++ ** gcvSTATUS_TIMEOUT and we release it. Otherwise, global
++ ** semaphore will be acquried now, but it still is released
++ ** immediately. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status != gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++#if gcdDVFS
++ if (State == gcvPOWER_ON && Hardware->kernel->dvfs)
++ {
++ gckDVFS_Start(Hardware->kernel->dvfs);
++ }
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commandStarted)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Stop(command, gcvFALSE));
++ }
++
++ if (isrStarted)
++ {
++ gcmkVERIFY_OK(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ }
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (globalAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetGpuProfiler
++**
++** Configure GPU profiler function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL GpuProfiler
++** GOU Profiler State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->gpuProfiler = GpuProfiler;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ )
++{
++ gceSTATUS status;
++ gctUINT32 clock;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x FscaleValue=%d", Hardware, FscaleValue);
++
++ gcmkVERIFY_ARGUMENT(FscaleValue > 0 && FscaleValue <= 64);
++
++ gcmkONERROR(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ Hardware->powerOnFscaleVal = FscaleValue;
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (FscaleValue) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ /* Restore all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ )
++{
++ *FscaleValue = Hardware->powerOnFscaleVal;
++ if ((gpu3DMinClock > 0) && (gpu3DMinClock <= 64) && (Hardware->core == gcvCORE_MAJOR))
++ *MinFscaleValue = gpu3DMinClock;
++ else
++ *MinFscaleValue = 1;
++ *MaxFscaleValue = 64;
++
++ return gcvSTATUS_OK;
++}
++
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle, address;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address >= Hardware->lastWaitLink)
++ && (address <= Hardware->lastWaitLink + 16)
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ *IsIdle = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ *IsIdle = gcvFALSE;
++ }
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** Handy macros that will help in reading those debug registers.
++*/
++
++#define gcmkREAD_DEBUG_REGISTER(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &profiler->data))
++
++#define gcmkREAD_DEBUG_REGISTER_N(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &data))
++
++#define gcmkRESET_DEBUG_REGISTER(control, block) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 15))); \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 0)))
++
++/*******************************************************************************
++**
++** gckHARDWARE_ProfileEngine2D
++**
++** Read the profile registers available in the 2D engine and sets them in the
++** profile. The function will also reset the pixelsRendered counter every time.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** OPTIONAL gcs2D_PROFILE_PTR Profile
++** Pointer to a gcs2D_Profile structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ )
++{
++ gceSTATUS status;
++ gcs2D_PROFILE_PTR profiler = Profile;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Profile != gcvNULL)
++ {
++ /* Read the cycle count. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &Profile->cycleCount));
++
++ /* Read pixels rendered by 2D engine. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &profiler->pixelsRendered));
++
++ /* Reset counter. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ if(Reset){
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));}
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));}
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));}
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));}
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));}
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));}
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ if(Reset){ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));}
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++#define gcmkUPDATE_PROFILE_DATA(data) \
++ profilerHistroy->data += profiler->data
++
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = Hardware->kernel->command;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os, command->mutexContextSeq, gcvINFINITE
++ ));
++
++ /* Read the counters. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ profiler, &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ if (Reset)
++ {
++ /* Reset counters. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os, command->mutexContextSeq
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = &Context->latestProfiler;
++ gcsPROFILER_COUNTERS * profilerHistroy = &Context->histroyProfiler;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 temp;
++ gctBOOL needResetShader = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x Context=0x%x", Hardware, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ chipModel = Hardware->identity.chipModel;
++ chipRevision = Hardware->identity.chipRevision;
++ if (chipModel == gcv2000 || (chipModel == gcv2100 && chipRevision == 0x5118))
++ {
++ needResetShader = gcvTRUE;
++ }
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuTotalCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuIdleCyclesCounter);
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++ gcmkUPDATE_PROFILE_DATA(gpuTotalRead64BytesPerFrame);
++ gcmkUPDATE_PROFILE_DATA(gpuTotalWrite64BytesPerFrame);
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_depth_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_depth_pipe);
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++
++
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->ps_inst_counter;
++ profiler->ps_inst_counter -= Context->prevPSInstCount;
++ Context->prevPSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(ps_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_pixel_counter;
++ profiler->rendered_pixel_counter -= Context->prevPSPixelCount;
++ Context->prevPSPixelCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_pixel_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vs_inst_counter;
++ profiler->vs_inst_counter -= Context->prevVSInstCount;
++ Context->prevVSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vs_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_vertice_counter;
++ profiler->rendered_vertice_counter -= Context->prevVSVertexCount;
++ Context->prevVSVertexCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_vertice_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_branch_inst_counter;
++ profiler->vtx_branch_inst_counter -= Context->prevVSBranchInstCount;
++ Context->prevVSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_texld_inst_counter;
++ profiler->vtx_texld_inst_counter -= Context->prevVSTexInstCount;
++ Context->prevVSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_branch_inst_counter;
++ profiler->pxl_branch_inst_counter -= Context->prevPSBranchInstCount;
++ Context->prevPSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_texld_inst_counter;
++ profiler->pxl_texld_inst_counter -= Context->prevPSTexInstCount;
++ Context->prevPSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_vtx_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_output_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_depth_clipped_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_trivial_rejected_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_culled_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_triangle_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_lines_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_pixel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_quad_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_quad_count_after_early_z);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_primitive_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_pipe_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_prefetch_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_bilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_trilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_discarded_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_in_8B_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_hit_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_IP);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_write_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_read_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_data_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ Hardware->powerBaseAddress +
++ 0x00104,
++ 0x00000000));
++
++ control = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable pulse-eater. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctUINT32 process, thread;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->identity.chipRevision < 0x4600)
++ {
++ /* Not supported - we need the isolation bit. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ status = gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread))
++ {
++ /* No way to recovery from a error in power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ mutexAcquired = gcvTRUE;
++ }
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(
++ gckOS_AcquireSemaphore(Hardware->os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if ((Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_IDLE)
++ )
++ {
++ /* Stop the command processor. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvTRUE));
++ }
++
++ /* Stop isr, we will start it again when power on GPU. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext, Hardware->core));
++ }
++
++ /* Hardware reset. */
++ status = gckOS_ResetGPU(Hardware->os, Hardware->core);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Soft reset. */
++ gcmkONERROR(_ResetGPU(Hardware, Hardware->os, Hardware->core));
++ }
++
++ /* Force an OFF to ON power switch. */
++ Hardware->chipPowerState = gcvPOWER_OFF;
++
++ gcmkONERROR(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the power management semaphore. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseSemaphore(Hardware->os, command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++ }
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Test if we have a new Memory Controller. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))))
++ {
++ /* No base address required. */
++ *BaseAddress = 0;
++ }
++ else
++ {
++ /* Get the base address from the OS. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, BaseAddress));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ )
++{
++ gctBOOL need = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x State=0x%08x", Hardware, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(NeedBase != gcvNULL);
++
++ /* Make sure this is a load state. */
++ if (((((gctUINT32) (State)) >> (0 ? 31:27) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))))
++ {
++#ifndef VIVANTE_NO_3D
++ /* Get the state address. */
++ switch ((((((gctUINT32) (State)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) ))
++ {
++ case 0x0596:
++ case 0x0597:
++ case 0x0599:
++ case 0x059A:
++ case 0x05A9:
++ /* These states need a TRUE physical address. */
++ need = gcvTRUE;
++ break;
++ }
++#else
++ /* 2D addresses don't need a base address. */
++#endif
++ }
++
++ /* Return the flag. */
++ *NeedBase = need;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedBase=%d", *NeedBase);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Hardware=0x%x, StartIsr=0x%x, StopIsr=0x%x, Context=0x%x",
++ Hardware, StartIsr, StopIsr, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (StartIsr == gcvNULL ||
++ StopIsr == gcvNULL ||
++ Context == gcvNULL)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ Hardware->startIsr = StartIsr;
++ Hardware->stopIsr = StopIsr;
++ Hardware->isrContext = Context;
++
++ /* Success. */
++ gcmkFOOTER();
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Compose
++**
++** Start a composition.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ )
++{
++#ifndef VIVANTE_NO_3D
++ gceSTATUS status;
++ gctUINT32_PTR triggerState;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x Logical=0x%x"
++ " Offset=%d Size=%d EventID=%d",
++ Hardware, Physical, Logical, Offset, Size, EventID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(((Size + 8) & 63) == 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Program the trigger state. */
++ triggerState = (gctUINT32_PTR) ((gctUINT8_PTR) Logical + Offset + Size);
++ triggerState[0] = 0x0C03;
++ triggerState[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16))) | (((gctUINT32) ((gctUINT32) (EventID) & ((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16)))
++ ;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Hardware->os, ProcessID, gcvNULL,
++ Physical, Logical, Offset + Size
++ ));
++#endif
++
++ /* Start composition. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os, Hardware->core, 0x00554,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ /* Return the status. */
++ return gcvSTATUS_NOT_SUPPORTED;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_IsFeatureAvailable
++**
++** Verifies whether the specified feature is available in hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gceFEATURE Feature
++** Feature to be verified.
++*/
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ )
++{
++ gctBOOL available;
++
++ gcmkHEADER_ARG("Hardware=0x%x Feature=%d", Hardware, Feature);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Only features needed by common kernel logic added here. */
++ switch (Feature)
++ {
++ case gcvFEATURE_END_EVENT:
++ /*available = gcmVERIFYFIELDVALUE(Hardware->identity.chipMinorFeatures2,
++ GC_MINOR_FEATURES2, END_EVENT, AVAILABLE
++ );*/
++ available = gcvFALSE;
++ break;
++ case gcvFEATURE_MC20:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))));
++ break;
++ case gcvFEATURE_DYNAMIC_FREQUENCY_SCALING:
++ /* This feature doesn't apply for 2D cores. */
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 14:14) & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_2D:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_3D:
++#ifndef VIVANTE_NO_3D
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++#else
++ available = gcvFALSE;
++#endif
++ break;
++
++ case gcvFEATURE_HALTI2:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ default:
++ gcmkFATAL("Invalid feature has been requested.");
++ available = gcvFALSE;
++ }
++
++ /* Return result. */
++ gcmkFOOTER_ARG("%d", available ? gcvSTATUS_TRUE : gcvSTATUS_OK);
++ return available ? gcvSTATUS_TRUE : gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpMMUException
++**
++** Dump the MMU debug info on an MMU exception.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ )
++{
++#if !gcdPOWER_SUSNPEND_WHEN_IDLE && !gcdPOWEROFF_TIMEOUT
++ gctUINT32 mmu, mmuStatus, address, i;
++#if gcdDEBUG
++ gctUINT32 mtlb, stlb, offset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkPRINT("GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ Hardware->core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** MMU ERROR DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00188,
++ &mmuStatus));
++
++ gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus);
++
++ for (i = 0; i < 4; i += 1)
++ {
++ mmu = mmuStatus & 0xF;
++ mmuStatus >>= 4;
++
++ if (mmu == 0)
++ {
++ continue;
++ }
++
++ switch (mmu)
++ {
++ case 1:
++ gcmkPRINT(" MMU%d: slave not present\n", i);
++ break;
++
++ case 2:
++ gcmkPRINT(" MMU%d: page not present\n", i);
++ break;
++
++ case 3:
++ gcmkPRINT(" MMU%d: write violation\n", i);
++ break;
++
++ default:
++ gcmkPRINT(" MMU%d: unknown state\n", i);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00190 + i * 4,
++ &address));
++
++ mtlb = (address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++ offset = address & gcdMMU_OFFSET_4K_MASK;
++
++ gcmkPRINT(" MMU%d: exception address = 0x%08X\n", i, address);
++
++ gcmkPRINT(" MTLB entry = %d\n", mtlb);
++
++ gcmkPRINT(" STLB entry = %d\n", stlb);
++
++ gcmkPRINT(" Offset = 0x%08X (%d)\n", offset, offset);
++
++ gckMMU_DumpPageTableEntry(Hardware->kernel->mmu, address);
++
++ }
++
++ gcmkFOOTER_NO();
++#else
++ /* If clock could be off automatically, we can't read mmu debug
++ ** register here; build driver with gcdPOWER_SUSPEND_WHEN_IDLE = 0
++ ** and gcdPOWEROFF_TIMEOUT = 0 to make it safe to read mmu register. */
++ gcmkPRINT("[galcore] %s(%d): MMU Exception!", __FUNCTION__, __LINE__);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpGPUState
++**
++** Dump the GPU debug registers.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ )
++{
++ static gctCONST_STRING _cmdState[] =
++ {
++ "PAR_IDLE_ST", "PAR_DEC_ST", "PAR_ADR0_ST", "PAR_LOAD0_ST",
++ "PAR_ADR1_ST", "PAR_LOAD1_ST", "PAR_3DADR_ST", "PAR_3DCMD_ST",
++ "PAR_3DCNTL_ST", "PAR_3DIDXCNTL_ST", "PAR_INITREQDMA_ST",
++ "PAR_DRAWIDX_ST", "PAR_DRAW_ST", "PAR_2DRECT0_ST", "PAR_2DRECT1_ST",
++ "PAR_2DDATA0_ST", "PAR_2DDATA1_ST", "PAR_WAITFIFO_ST", "PAR_WAIT_ST",
++ "PAR_LINK_ST", "PAR_END_ST", "PAR_STALL_ST"
++ };
++
++ static gctCONST_STRING _cmdDmaState[] =
++ {
++ "CMD_IDLE_ST", "CMD_START_ST", "CMD_REQ_ST", "CMD_END_ST"
++ };
++
++ static gctCONST_STRING _cmdFetState[] =
++ {
++ "FET_IDLE_ST", "FET_RAMVALID_ST", "FET_VALID_ST"
++ };
++
++ static gctCONST_STRING _reqDmaState[] =
++ {
++ "REQ_IDLE_ST", "REQ_WAITIDX_ST", "REQ_CAL_ST"
++ };
++
++ static gctCONST_STRING _calState[] =
++ {
++ "CAL_IDLE_ST", "CAL_LDADR_ST", "CAL_IDXCALC_ST"
++ };
++
++ static gctCONST_STRING _veReqState[] =
++ {
++ "VER_IDLE_ST", "VER_CKCACHE_ST", "VER_MISS_ST"
++ };
++
++ static gcsiDEBUG_REGISTERS _dbgRegs[] =
++ {
++ { "RA", 0x474, 16, 0x448, 16, 0x12344321 },
++ { "TX", 0x474, 24, 0x44C, 16, 0x12211221 },
++ { "FE", 0x470, 0, 0x450, 16, 0xBABEF00D },
++ { "PE", 0x470, 16, 0x454, 16, 0xBABEF00D },
++ { "DE", 0x470, 8, 0x458, 16, 0xBABEF00D },
++ { "SH", 0x470, 24, 0x45C, 16, 0xDEADBEEF },
++ { "PA", 0x474, 0, 0x460, 16, 0x0000AAAA },
++ { "SE", 0x474, 8, 0x464, 16, 0x5E5E5E5E },
++ { "MC", 0x478, 0, 0x468, 16, 0x12345678 },
++ { "HI", 0x478, 8, 0x46C, 16, 0xAAAAAAAA }
++ };
++
++ static gctUINT32 _otherRegs[] =
++ {
++ 0x040, 0x044, 0x04C, 0x050, 0x054, 0x058, 0x05C, 0x060,
++ 0x43c, 0x440, 0x444, 0x414,
++ };
++
++ gceSTATUS status;
++ gckKERNEL kernel;
++ gctUINT32 idle, axi;
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++ gctUINT32 dmaLow, dmaHigh;
++ gctUINT32 cmdState, cmdDmaState, cmdFetState;
++ gctUINT32 dmaReqState, calState, veReqState;
++ gctUINT i;
++ gctUINT pipe, pixelPipes;
++ gctUINT32 control, oldControl;
++ gckOS os = Hardware->os;
++ gceCORE core = Hardware->core;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ kernel = Hardware->kernel;
++
++ gcmkPRINT_N(12, "GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ pixelPipes = Hardware->identity.pixelPipes
++ ? Hardware->identity.pixelPipes
++ : 1;
++
++ /* Reset register values. */
++ idle = axi =
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 =
++ dmaLow = dmaHigh = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkONERROR(_VerifyDMA(
++ os, core, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++ cmdState = dmaState2 & 0x1F;
++ cmdDmaState = (dmaState2 >> 8) & 0x03;
++ cmdFetState = (dmaState2 >> 10) & 0x03;
++ dmaReqState = (dmaState2 >> 12) & 0x03;
++ calState = (dmaState2 >> 14) & 0x03;
++ veReqState = (dmaState2 >> 16) & 0x03;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x004, &idle));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00C, &axi));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x668, &dmaLow));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x66C, &dmaHigh));
++
++ gcmkPRINT_N(0, "**************************\n");
++ gcmkPRINT_N(0, "*** GPU STATE DUMP ***\n");
++ gcmkPRINT_N(0, "**************************\n");
++
++ gcmkPRINT_N(4, " axi = 0x%08X\n", axi);
++
++ gcmkPRINT_N(4, " idle = 0x%08X\n", idle);
++ if ((idle & 0x00000001) == 0) gcmkPRINT_N(0, " FE not idle\n");
++ if ((idle & 0x00000002) == 0) gcmkPRINT_N(0, " DE not idle\n");
++ if ((idle & 0x00000004) == 0) gcmkPRINT_N(0, " PE not idle\n");
++ if ((idle & 0x00000008) == 0) gcmkPRINT_N(0, " SH not idle\n");
++ if ((idle & 0x00000010) == 0) gcmkPRINT_N(0, " PA not idle\n");
++ if ((idle & 0x00000020) == 0) gcmkPRINT_N(0, " SE not idle\n");
++ if ((idle & 0x00000040) == 0) gcmkPRINT_N(0, " RA not idle\n");
++ if ((idle & 0x00000080) == 0) gcmkPRINT_N(0, " TX not idle\n");
++ if ((idle & 0x00000100) == 0) gcmkPRINT_N(0, " VG not idle\n");
++ if ((idle & 0x00000200) == 0) gcmkPRINT_N(0, " IM not idle\n");
++ if ((idle & 0x00000400) == 0) gcmkPRINT_N(0, " FP not idle\n");
++ if ((idle & 0x00000800) == 0) gcmkPRINT_N(0, " TS not idle\n");
++ if ((idle & 0x80000000) != 0) gcmkPRINT_N(0, " AXI low power mode\n");
++
++ if (
++ (dmaAddress1 == dmaAddress2)
++ && (dmaState1 == dmaState2)
++ )
++ {
++ gcmkPRINT_N(0, " DMA appears to be stuck at this address:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ }
++ else
++ {
++ if (dmaAddress1 == dmaAddress2)
++ {
++ gcmkPRINT_N(0, " DMA address is constant, but state is changing:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState2);
++ }
++ else
++ {
++ gcmkPRINT_N(0, " DMA is running; known addresses are:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress2);
++ }
++ }
++ gcmkPRINT_N(4, " dmaLow = 0x%08X\n", dmaLow);
++ gcmkPRINT_N(4, " dmaHigh = 0x%08X\n", dmaHigh);
++ gcmkPRINT_N(4, " dmaState = 0x%08X\n", dmaState2);
++ gcmkPRINT_N(8, " command state = %d (%s)\n", cmdState, _cmdState [cmdState]);
++ gcmkPRINT_N(8, " command DMA state = %d (%s)\n", cmdDmaState, _cmdDmaState[cmdDmaState]);
++ gcmkPRINT_N(8, " command fetch state = %d (%s)\n", cmdFetState, _cmdFetState[cmdFetState]);
++ gcmkPRINT_N(8, " DMA request state = %d (%s)\n", dmaReqState, _reqDmaState[dmaReqState]);
++ gcmkPRINT_N(8, " cal state = %d (%s)\n", calState, _calState [calState]);
++ gcmkPRINT_N(8, " VE request state = %d (%s)\n", veReqState, _veReqState [veReqState]);
++
++ /* Record control. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &oldControl);
++
++ for (pipe = 0; pipe < pixelPipes; pipe++)
++ {
++ gcmkPRINT_N(4, " Debug registers of pipe[%d]:\n", pipe);
++
++ /* Switch pipe. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &control);
++ control &= ~(0xF << 20);
++ control |= (pipe << 20);
++ gckOS_WriteRegisterEx(os, core, 0x0, control);
++
++ for (i = 0; i < gcmCOUNTOF(_dbgRegs); i += 1)
++ {
++ gcmkONERROR(_DumpDebugRegisters(os, core, &_dbgRegs[i]));
++ }
++
++ gcmkPRINT_N(0, " Other Registers:\n");
++ for (i = 0; i < gcmCOUNTOF(_otherRegs); i += 1)
++ {
++ gctUINT32 read;
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, _otherRegs[i], &read));
++ gcmkPRINT_N(12, " [0x%04X] 0x%08X\n", _otherRegs[i], read);
++ }
++ }
++
++ if (kernel->hardware->identity.chipFeatures & (1 << 4))
++ {
++ gctUINT32 read0, read1, write;
++
++ read0 = read1 = write = 0;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x43C, &read0));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x440, &read1));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x444, &write));
++
++ gcmkPRINT_N(4, " read0 = 0x%08X\n", read0);
++ gcmkPRINT_N(4, " read1 = 0x%08X\n", read1);
++ gcmkPRINT_N(4, " write = 0x%08X\n", write);
++ }
++
++ /* Restore control. */
++ gckOS_WriteRegisterEx(os, core, 0x0, oldControl);
++
++ /* dump stack. */
++ gckOS_DumpCallStack(os);
++
++OnError:
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++#if gcdFRAME_DB
++static gceSTATUS
++gckHARDWARE_ReadPerformanceRegister(
++ IN gckHARDWARE Hardware,
++ IN gctUINT PerformanceAddress,
++ IN gctUINT IndexAddress,
++ IN gctUINT IndexShift,
++ IN gctUINT Index,
++ OUT gctUINT32_PTR Value
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x PerformanceAddress=0x%x IndexAddress=0x%x "
++ "IndexShift=%u Index=%u",
++ Hardware, PerformanceAddress, IndexAddress, IndexShift,
++ Index);
++
++ /* Write the index. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ IndexAddress,
++ Index << IndexShift));
++
++ /* Read the register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ PerformanceAddress,
++ Value));
++
++ /* Test for reset. */
++ if (Index == 15)
++ {
++ /* Index another register to get out of reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, IndexAddress, 0));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=0x%x", *Value);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ )
++{
++ gceSTATUS status;
++ gctUINT i, clock;
++ gcsHAL_FRAME_INFO info;
++#if gcdFRAME_DB_RESET
++ gctUINT reset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Get profile tick. */
++ gcmkONERROR(gckOS_GetProfileTick(&info.ticks));
++
++ /* Read SH counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 4,
++ &info.shaderCycles));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 9,
++ &info.vsInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 12,
++ &info.vsTextureCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 7,
++ &info.psInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 14,
++ &info.psTextureCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read PA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 3,
++ &info.vertexCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 4,
++ &info.primitiveCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 7,
++ &info.rejectedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 8,
++ &info.culledPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 6,
++ &info.clippedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 5,
++ &info.outPrimitives));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 15,
++ &reset));
++#endif
++
++ /* Read RA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 3,
++ &info.inPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 11,
++ &info.culledQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 1,
++ &info.totalQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 2,
++ &info.quadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 0,
++ &info.totalPixelCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Read TX counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 0,
++ &info.bilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 1,
++ &info.trilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 8,
++ &info.txHitCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 9,
++ &info.txMissCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 6,
++ &info.txBytes8));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* Read cycle registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &info.cycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &info.idleCycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &info.mcCycles[i]));
++
++ /* Read bandwidth registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0005C,
++ &info.readRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &info.readBytes8[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00050,
++ &info.writeRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &info.writeBytes8[i]));
++
++ /* Read PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 0,
++ &info.colorKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 2,
++ &info.colorDrawn[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 1,
++ &info.depthKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 3,
++ &info.depthDrawn[i]));
++ }
++
++ /* Zero out remaning reserved counters. */
++ for (; i < 8; ++i)
++ {
++ info.readBytes8[i] = 0;
++ info.writeBytes8[i] = 0;
++ info.cycles[i] = 0;
++ info.idleCycles[i] = 0;
++ info.mcCycles[i] = 0;
++ info.readRequests[i] = 0;
++ info.writeRequests[i] = 0;
++ info.colorKilled[i] = 0;
++ info.colorDrawn[i] = 0;
++ info.depthKilled[i] = 0;
++ info.depthDrawn[i] = 0;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset cycle and bandwidth counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 1));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ 0));
++
++#if gcdFRAME_DB_RESET
++ /* Reset PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Copy to user. */
++ gcmkONERROR(gckOS_CopyToUserData(Hardware->os,
++ &info,
++ FrameInfo,
++ gcmSIZEOF(info)));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++#if gcdDVFS
++#define READ_FROM_EATER1 0
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ )
++{
++ gctUINT32 debug1;
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Load != gcvNULL);
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00110,
++ Load));
++#if READ_FROM_EATER1
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00134,
++ Load));
++#endif
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00114,
++ &debug1));
++
++ /* Patch result of 0x110 with result of 0x114. */
++ if ((debug1 & 0xFF) == 1)
++ {
++ *Load &= ~0xFF;
++ *Load |= 1;
++ }
++
++ if (((debug1 & 0xFF00) >> 8) == 1)
++ {
++ *Load &= ~(0xFF << 8);
++ *Load |= 1 << 8;
++ }
++
++ if (((debug1 & 0xFF0000) >> 16) == 1)
++ {
++ *Load &= ~(0xFF << 16);
++ *Load |= 1 << 16;
++ }
++
++ if (((debug1 & 0xFF000000) >> 24) == 1)
++ {
++ *Load &= ~(0xFF << 24);
++ *Load |= 1 << 24;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 Frequency
++ )
++{
++ gceSTATUS status;
++ gctUINT32 period;
++ gctUINT32 eater;
++
++#if READ_FROM_EATER1
++ gctUINT32 period1;
++ gctUINT32 eater1;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%X Frequency=%d", Hardware, Frequency);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ period = 0;
++
++ while((64 << period) < (gcdDVFS_ANAYLSE_WINDOW * Frequency * 1000) )
++ {
++ period++;
++ }
++
++#if READ_FROM_EATER1
++ /*
++ * Peroid = F * 1000 * 1000 / (60 * 16 * 1024);
++ */
++ period1 = Frequency * 6250 / 6114;
++#endif
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Get current configure. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &eater));
++
++ /* Change peroid. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (eater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (period) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))));
++
++#if READ_FROM_EATER1
++ /* Config eater1. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ &eater1));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ ((((gctUINT32) (eater1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))) | (((gctUINT32) ((gctUINT32) (period1) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16)))));
++#endif
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 data;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "DVFS Configure=0x%X",
++ data);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h linux-xbian-imx6/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/arch/XAQ2/hal/kernel/gc_hal_kernel_hardware.h 2015-07-27 23:13:06.170964992 +0200
+@@ -0,0 +1,136 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_h_
++#define __gc_hal_kernel_hardware_h_
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_hardware_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* gckHARDWARE object. */
++struct _gckHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gctKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Pointer to gctOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Chip characteristics. */
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctBOOL allowFastClear;
++ gctBOOL allowCompression;
++ gctUINT32 powerBaseAddress;
++ gctBOOL extraEventStates;
++
++ /* Big endian */
++ gctBOOL bigEndian;
++
++ /* Chip status */
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gctUINT32 lastWaitLink;
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER globalSemaphore;
++
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++
++ gctUINT32 mmuVersion;
++
++ /* Type */
++ gceHARDWARE_TYPE type;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctPOINTER pageTableDirty;
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ /* FSCALE_VAL when gcvPOWER_ON. */
++ gctUINT32 powerOnFscaleVal;
++#endif
++
++#if gcdLINK_QUEUE_SIZE
++ struct _gckLINKQUEUE linkQueue;
++#endif
++
++ gctBOOL powerManagement;
++ gctBOOL gpuProfiler;
++};
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ );
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ );
++
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/config linux-xbian-imx6/drivers/mxc/gpu-viv/config
+--- linux-4.1.3/drivers/mxc/gpu-viv/config 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/config 2015-07-27 23:13:06.170964992 +0200
+@@ -0,0 +1,38 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++ARCH_TYPE ?= arm
++SDK_DIR ?= $(AQROOT)/build/sdk
++USE_3D_VG ?= 1
++FORCE_ALL_VIDEO_MEMORY_CACHED ?= 0
++NONPAGED_MEMORY_CACHEABLE ?= 0
++NONPAGED_MEMORY_BUFFERABLE ?= 1
++CACHE_FUNCTION_UNIMPLEMENTED ?= 0
++VIVANTE_ENABLE_VG ?= 1
++NO_USER_DIRECT_ACCESS_FROM_KERNEL ?= 1
++VIVANTE_NO_3D ?= 0
++ENABLE_OUTER_CACHE_PATCH ?= 1
++USE_BANK_ALIGNMENT ?= 1
++BANK_BIT_START ?= 13
++BANK_BIT_END ?= 15
++BANK_CHANNEL_BIT ?= 12
++ENABLE_GPU_CLOCK_BY_DRIVER = 1
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.c 2015-07-27 23:13:06.186908111 +0200
+@@ -0,0 +1,2317 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++#include "gc_hal_kernel_buffer.h"
++
++/******************************************************************************\
++******************************** Debugging Macro *******************************
++\******************************************************************************/
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++
++/******************************************************************************\
++************************** Context State Buffer Helpers ************************
++\******************************************************************************/
++
++#define _STATE(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT(reg, count) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_COUNT_OFFSET(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _STATE_MIRROR_COUNT(reg, mirror, count) \
++ _StateMirror(\
++ Context, \
++ reg ## _Address >> 2, \
++ count, \
++ mirror ## _Address >> 2 \
++ )
++
++#define _STATE_HINT(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_HINT_BLOCK(reg, block, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + (block << reg ## _BLK), \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_COUNT_OFFSET_HINT(reg, offset, count) \
++ _State(\
++ Context, index, \
++ (reg ## _Address >> 2) + offset, \
++ reg ## _ResetValue, \
++ count, \
++ gcvFALSE, gcvTRUE \
++ )
++
++#define _STATE_X(reg) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ reg ## _ResetValue, \
++ reg ## _Count, \
++ gcvTRUE, gcvFALSE \
++ )
++
++#define _STATE_INIT_VALUE(reg, value) \
++ _State(\
++ Context, index, \
++ reg ## _Address >> 2, \
++ value, \
++ reg ## _Count, \
++ gcvFALSE, gcvFALSE \
++ )
++
++#define _CLOSE_RANGE() \
++ _TerminateStateBlock(Context, index)
++
++#define _ENABLE(reg, field) \
++ do \
++ { \
++ if (gcmVERIFYFIELDVALUE(data, reg, MASK_ ## field, ENABLED)) \
++ { \
++ enable |= gcmFIELDMASK(reg, field); \
++ } \
++ } \
++ while (gcvFALSE)
++
++#define _BLOCK_COUNT(reg) \
++ ((reg ## _Count) >> (reg ## _BLK))
++
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++#define gcdSTATE_MASK \
++ (((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 | 0xC0FFEE & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))))
++
++#if gcdENABLE_3D
++static gctUINT32
++_TerminateStateBlock(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 align;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ /* Flush the current state block; make sure no pairing with the states
++ to follow happens. */
++ if (align && (buffer != gcvNULL))
++ {
++ buffer[Index] = 0xDEADDEAD;
++ }
++
++ /* Reset last address. */
++ Context->lastAddress = ~0U;
++
++ /* Return alignment requirement. */
++ return align;
++}
++#endif
++
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gctUINT32
++_FlushPipe(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ gctBOOL fcFlushStall;
++ gctUINT32 flushSlots;
++ gctBOOL iCacheInvalidate;
++
++ fcFlushStall
++ = gckHARDWARE_IsFeatureAvailable(Context->hardware, gcvFEATURE_FC_FLUSH_STALL);
++
++ iCacheInvalidate
++ = ((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ flushSlots = 6;
++
++ if (fcFlushStall)
++ {
++ /* Flush tile status cache. */
++ flushSlots += 6;
++ }
++
++ if (iCacheInvalidate)
++ {
++ flushSlots += 12;
++ }
++
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Flush the current pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ if (fcFlushStall)
++ {
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ if (iCacheInvalidate)
++ {
++ /* Invalidate I$ after pipe is stalled */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0218) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0218) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x021A) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ }
++
++ /* Number of slots taken by flushing pipe. */
++ return flushSlots;
++}
++#endif
++
++#if gcdENABLE_3D
++static gctUINT32
++_SemaphoreStall(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index
++ )
++{
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ /* Semaphore/stall takes 4 slots. */
++ return 4;
++}
++#endif
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gctUINT32
++_SwitchPipe(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gcePIPE_SELECT Pipe
++ )
++{
++ gctUINT32 slots = 6;
++
++ if (Context->buffer != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Address correct index. */
++ buffer = Context->buffer->logical + Index;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ /* Semaphore from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall from FE to PE. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ Context->pipeSelectBytes = slots * gcmSIZEOF(gctUINT32);
++
++ return slots;
++}
++#endif
++
++#if gcdENABLE_3D
++static gctUINT32
++_State(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Index,
++ IN gctUINT32 Address,
++ IN gctUINT32 Value,
++ IN gctUINT32 Size,
++ IN gctBOOL FixedPoint,
++ IN gctBOOL Hinted
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 align;
++ gctUINT32 i;
++
++ /* Determine if we need alignment. */
++ align = (Index & 1) ? 1 : 0;
++
++ /* Address correct index. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++ if ((buffer == gcvNULL) && (Address + Size > Context->stateCount))
++ {
++ /* Determine maximum state. */
++ Context->stateCount = Address + Size;
++ }
++
++ /* Do we need a new entry? */
++ if ((Address != Context->lastAddress) || (FixedPoint != Context->lastFixed))
++ {
++ if (buffer != gcvNULL)
++ {
++ if (align)
++ {
++ /* Add filler. */
++ buffer[Index++] = 0xDEADDEAD;
++ }
++
++ /* LoadState(Address, Count). */
++ gcmkASSERT((Index & 1) == 0);
++
++ if (FixedPoint)
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++ else
++ {
++ buffer[Index]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1))))))) << (0 ? 26:26)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ /* Walk all the states. */
++ for (i = 0; i < (gctUINT32)Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + 1 + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = (gctUINT)Index + 1 + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Save information for this LoadState. */
++ Context->lastIndex = (gctUINT)Index;
++ Context->lastAddress = Address + (gctUINT32)Size;
++ Context->lastSize = Size;
++ Context->lastFixed = FixedPoint;
++
++ /* Return size for load state. */
++ return align + 1 + Size;
++ }
++
++ /* Append this state to the previous one. */
++ if (buffer != gcvNULL)
++ {
++ /* Update last load state. */
++ buffer[Context->lastIndex] =
++ ((((gctUINT32) (buffer[Context->lastIndex])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Context->lastSize + Size) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Walk all the states. */
++ for (i = 0; i < (gctUINT32)Size; i += 1)
++ {
++ /* Set state to uninitialized value. */
++ buffer[Index + i] = Value;
++
++ /* Set index in state mapping table. */
++ Context->map[Address + i].index = (gctUINT)Index + i;
++
++#if gcdSECURE_USER
++ /* Save hint. */
++ if (Context->hint != gcvNULL)
++ {
++ Context->hint[Address + i] = Hinted;
++ }
++#endif
++ }
++ }
++
++ /* Update last address and size. */
++ Context->lastAddress += (gctUINT32)Size;
++ Context->lastSize += Size;
++
++ /* Return number of slots required. */
++ return Size;
++}
++
++static gctUINT32
++_StateMirror(
++ IN gckCONTEXT Context,
++ IN gctUINT32 Address,
++ IN gctUINT32 Size,
++ IN gctUINT32 AddressMirror
++ )
++{
++ gctUINT32 i;
++
++ /* Process when buffer is set. */
++ if (Context->buffer != gcvNULL)
++ {
++ /* Walk all states. */
++ for (i = 0; i < Size; i++)
++ {
++ /* Copy the mapping address. */
++ Context->map[Address + i].index =
++ Context->map[AddressMirror + i].index;
++ }
++ }
++
++ /* Return the number of required maps. */
++ return Size;
++}
++#endif
++
++#if (gcdENABLE_3D || gcdENABLE_2D)
++static gceSTATUS
++_InitializeContextBuffer(
++ IN gckCONTEXT Context
++ )
++{
++ gctUINT32_PTR buffer;
++ gctUINT32 index;
++
++#if gcdENABLE_3D
++ gctBOOL halti0, halti1, halti2, halti3;
++ gctUINT i;
++ gctUINT vertexUniforms, fragmentUniforms, vsConstBase, psConstBase, constMax;
++ gctBOOL unifiedUniform;
++ gctUINT fe2vsCount;
++#endif
++
++ /* Reset the buffer index. */
++ index = 0;
++
++ /* Reset the last state address. */
++ Context->lastAddress = ~0U;
++
++ /* Get the buffer pointer. */
++ buffer = (Context->buffer == gcvNULL)
++ ? gcvNULL
++ : Context->buffer->logical;
++
++
++ /**************************************************************************/
++ /* Build 2D states. *******************************************************/
++
++
++#if gcdENABLE_3D
++ /**************************************************************************/
++ /* Build 3D states. *******************************************************/
++
++ halti0 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) );
++ halti1 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) );
++ halti2 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) );
++ halti3 = (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures5)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) );
++
++ /* Query how many uniforms can support for non-unified uniform mode. */
++ {if (Context->hardware->identity.numConstants > 256){ unifiedUniform = gcvTRUE; vsConstBase = 0xC000; psConstBase = 0xC000; constMax = Context->hardware->identity.numConstants; vertexUniforms = 256; fragmentUniforms = constMax - vertexUniforms;}else if (Context->hardware->identity.numConstants == 256){ if (Context->hardware->identity.chipModel == gcv2000 && Context->hardware->identity.chipRevision == 0x5118) { unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 256; fragmentUniforms = 64; constMax = 320; } else { unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 256; fragmentUniforms = 256; constMax = 512; }}else{ unifiedUniform = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vertexUniforms = 168; fragmentUniforms = 64; constMax = 232;}};
++
++#if !gcdENABLE_UNIFIED_CONSTANT
++ if (Context->hardware->identity.numConstants > 256)
++ {
++ unifiedUniform = gcvTRUE;
++ }
++ else
++ {
++ unifiedUniform = gcvFALSE;
++ }
++#endif
++
++ /* Store the 3D entry index. */
++ Context->entryOffset3D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Current context pointer. */
++#if gcdDEBUG
++ index += _State(Context, index, 0x03850 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ index += _FlushPipe(Context, index, gcvPIPE_3D);
++
++ /* Global states. */
++ index += _State(Context, index, 0x03814 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03818 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0381C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03820 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03828 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0382C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03834 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03854 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0384C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Front End states. */
++ fe2vsCount = 12;
++ if (halti0)
++ {
++ fe2vsCount = 16;
++ }
++ index += _State(Context, index, 0x00600 >> 2, 0x00000000, fe2vsCount, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x00644 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00648 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0064C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x00650 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00680 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x006A0 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0067C >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x006C0 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00700 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00740 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00780 >> 2, 0x3F800000, 16, gcvFALSE, gcvFALSE);
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x14600 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14640 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14680 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ }
++
++ /* This register is programed by all chips, which program all DECODE_SELECT as VS
++ ** except SAMPLER_DECODE_SELECT.
++ */
++ index += _State(Context, index, 0x00860 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ /* I-Cache states. */
++ index += _State(Context, index, 0x00868 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0086C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0304C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01028 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x00890 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0104C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++ }
++ }
++
++ /* Vertex Shader states. */
++ index += _State(Context, index, 0x00804 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00808 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0080C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00810 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00820 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00830 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++
++ /* Primitive Assembly states. */
++ index += _State(Context, index, 0x00A00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A0C >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A10 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A1C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A28 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A2C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A30 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A40 >> 2, 0x00000000, Context->hardware->identity.varyingsCount, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A34 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A38 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A3C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A80 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A84 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00A8C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00A88 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++#if gcdMULTI_GPU
++ index += _State(Context, index, 0x03A00 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03A04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x03A08 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++ /* Setup states. */
++ index += _State(Context, index, 0x00C00 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C04 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C08 >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C0C >> 2, 0x45000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C10 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C14 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C18 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C1C >> 2, 0x42000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00C20 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++ index += _State(Context, index, 0x00C24 >> 2, 0x00000000, 1, gcvTRUE, gcvFALSE);
++
++ /* Raster states. */
++ index += _State(Context, index, 0x00E00 >> 2, 0x00000001, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E10 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E04 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E40 >> 2, 0x00000000, 16, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E08 >> 2, 0x00000031, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E24 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00E20 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x00E0C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ /* Pixel Shader states. */
++ index += _State(Context, index, 0x01004 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0100C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01010 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01030 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++
++ /* Texture states. */
++ index += _State(Context, index, 0x02000 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02040 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02080 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x020C0 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02100 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02140 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02180 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x021C0 >> 2, 0x00321000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02200 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x02240 >> 2, 0x00000000, 12, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x02400 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02440 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02480 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x024C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02500 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02540 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02580 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x025C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02600 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02640 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02680 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x026C0 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02700 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x02740 >> 2) + (0 << 4), 0x00000000, 12, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 22:22)) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) ))
++ {
++ /*
++ * Linear stride LODn will overwrite LOD0 on GC880,GC2000.
++ * And only LOD0 is valid for this register.
++ */
++ gctUINT count = halti1 ? 14 : 1;
++
++ for (i = 0; i < 12; i += 1)
++ {
++ index += _State(Context, index, (0x02C00 >> 2) + i * 16, 0x00000000, count, gcvFALSE, gcvFALSE);
++ }
++ }
++
++ if (halti1)
++ {
++ gctUINT texBlockCount;
++ gctUINT gcregTXLogSizeResetValue;
++
++ /* Enable the integer filter pipe for all texture samplers
++ so that the floating point filter clock will shut off until
++ we start using the floating point filter.
++ */
++ gcregTXLogSizeResetValue = ((((gctUINT32) (0x00000000)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 29:29) - (0 ? 29:29) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 29:29) - (0 ? 29:29) + 1))))))) << (0 ? 29:29))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 29:29) - (0 ? 29:29) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 29:29) - (0 ? 29:29) + 1))))))) << (0 ? 29:29)));
++
++ /* New texture block. */
++ index += _State(Context, index, 0x10000 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10080 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10100 >> 2, gcregTXLogSizeResetValue, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10180 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10200 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10280 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10300 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10380 >> 2, 0x00321000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10400 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10480 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures2)) >> (0 ? 15:15)) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x12000 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x12400 >> 2, 0x00000000, 256, gcvFALSE, gcvFALSE);
++ }
++
++ texBlockCount = ((512) >> (4));
++
++ for (i = 0; i < texBlockCount; i += 1)
++ {
++ index += _State(Context, index, (0x10800 >> 2) + (i << 4), 0x00000000, 14, gcvFALSE, gcvTRUE);
++ }
++ }
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x10700 >> 2, 0x00000F00, 32, gcvFALSE, gcvFALSE);
++ }
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x10780 >> 2, 0x00030000, 32, gcvFALSE, gcvFALSE);
++ }
++
++ /* ASTC */
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 13:13)) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) ))
++ {
++ index += _State(Context, index, 0x10500 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10580 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10600 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x10680 >> 2, 0x00000000, 32, gcvFALSE, gcvFALSE);
++ }
++
++ /* YUV. */
++ index += _State(Context, index, 0x01678 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0167C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01680 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01684 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01688 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0168C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01690 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01694 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01698 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0169C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* Thread walker states. */
++ index += _State(Context, index, 0x00900 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00904 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00908 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0090C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00910 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00914 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00918 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0091C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00924 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (((((gctUINT32) (Context->hardware->identity.chipMinorFeatures3)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ index += _State(Context, index, 0x00940 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00944 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00948 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0094C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00950 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00954 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++
++ if (!halti3)
++ {
++ if (Context->hardware->identity.instructionCount > 1024)
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x20000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ else if (Context->hardware->identity.instructionCount > 256)
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ /* VX instruction memory. */
++ for (i = 0;
++ i < Context->hardware->identity.instructionCount << 2;
++ i += 256 << 2
++ )
++ {
++ index += _State(Context, index, (0x0C000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ _StateMirror(Context, 0x08000 >> 2, Context->hardware->identity.instructionCount << 2 , 0x0C000 >> 2);
++ }
++ else /* if (Context->hardware->identity.instructionCount <= 256) */
++ {
++ /* old shader instruction PC registers */
++ index += _State(Context, index, 0x00800 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00838 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x01000 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01018 >> 2, 0x01000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ index += _State(Context, index, 0x04000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x06000 >> 2, 0x00000000, 1024, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++ }
++ /* I cache use the new instruction PC registers */
++ else
++ {
++ /* New Shader instruction PC registers. */
++ index += _State(Context, index, 0x0085C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0101C >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ if (unifiedUniform)
++ {
++ gctINT numConstants = Context->hardware->identity.numConstants;
++
++ index += _State(Context, index, 0x01024 >> 2, 0x00000100, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x00864 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ for (i = 0;
++ numConstants > 0;
++ i += 256 << 2,
++ numConstants -= 256
++ )
++ {
++ if (numConstants >= 256)
++ {
++ index += _State(Context, index, (0x30000 >> 2) + i, 0x00000000, 256 << 2, gcvFALSE, gcvFALSE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x30000 >> 2) + i, 0x00000000, numConstants << 2, gcvFALSE, gcvFALSE);
++ }
++ index += _CLOSE_RANGE();
++ }
++ }
++#if gcdENABLE_UNIFIED_CONSTANT
++ else
++#endif
++ {
++ index += _State(Context, index, 0x05000 >> 2, 0x00000000, vertexUniforms * 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x07000 >> 2, 0x00000000, fragmentUniforms * 4, gcvFALSE, gcvFALSE);
++ }
++
++ /* Store the index of the "XD" entry. */
++ Context->entryOffsetXDFrom3D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++
++ /* Pixel Engine states. */
++ index += _State(Context, index, 0x01400 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01404 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01408 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0140C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01414 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01418 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0141C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01420 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01424 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01428 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0142C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01434 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01454 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01458 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0145C >> 2, 0x00000010, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A8 >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014AC >> 2, 0xFFFFFFFF, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014A4 >> 2, 0x000E400C, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01580 >> 2, 0x00000000, 3, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x014B8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ /* Composition states. */
++ index += _State(Context, index, 0x03008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ if (Context->hardware->identity.pixelPipes == 1)
++ {
++ index += _State(Context, index, 0x01460 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01430 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01410 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ }
++ else
++ {
++ index += _State(Context, index, (0x01460 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ if (Context->hardware->identity.pixelPipes > 1 || halti0)
++ {
++ index += _State(Context, index, (0x01480 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ for (i = 0; i < 3; i++)
++ {
++ index += _State(Context, index, (0x01500 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++
++ if (halti2)
++ {
++ for (i = 0; i < 7; i++)
++ {
++ index += _State(Context, index, (0x14800 >> 2) + (i << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++ }
++ index += _State(Context, index, 0x14900 >> 2, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ }
++
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x014BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ }
++
++ /* Resolve states. */
++ index += _State(Context, index, 0x01604 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01608 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0160C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01610 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01614 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01620 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01630 >> 2, 0x00000000, 2, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01640 >> 2, 0x00000000, 4, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x0163C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A0 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016B4 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++
++ if ((Context->hardware->identity.pixelPipes > 1) || halti1)
++ {
++ index += _State(Context, index, (0x016C0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, (0x016E0 >> 2) + (0 << 3), 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvTRUE);
++
++ index += _State(Context, index, 0x01700 >> 2, 0x00000000, Context->hardware->identity.pixelPipes, gcvFALSE, gcvFALSE);
++ }
++
++#if gcd3DBLIT
++ index += _State(Context, index, (0x14000 >> 2) + (0 << 1), 0x00000000, 2, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14008 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1400C >> 2, 0x0001C800, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14010 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14014 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x14018 >> 2) + (0 << 1), 0x00000000, 2, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14020 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x14024 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14028 >> 2, 0x0001C800, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1402C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14030 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14034 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14038 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1403C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14040 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14044 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14048 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1404C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14050 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14058 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1405C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14054 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14100 >> 2, 0x00000000, 64, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14200 >> 2, 0x00000000, 64, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14064 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14068 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _State(Context, index, 0x1406C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14070 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14074 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14078 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1407C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14080 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14084 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14088 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x1408C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14090 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++
++ index += _State(Context, index, 0x14094 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x14098 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++#endif
++
++ /* Tile status. */
++ index += _State(Context, index, 0x01654 >> 2, 0x00200000, 1, gcvFALSE, gcvFALSE);
++
++ index += _CLOSE_RANGE();
++ index += _State(Context, index, 0x01658 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0165C >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01660 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01664 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01668 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x0166C >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01670 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01674 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A4 >> 2, 0x00000000, 1, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x016AC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016A8 >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01720 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x01740 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, 0x01760 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++
++
++ if (halti2)
++ {
++ index += _State(Context, index, 0x01780 >> 2, 0x00000000, 8, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, 0x016BC >> 2, 0x00000000, 1, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x017A0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x017C0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x017E0 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvTRUE);
++ index += _State(Context, index, (0x01A00 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x01A20 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ index += _State(Context, index, (0x01A40 >> 2) + 1, 0x00000000, 7, gcvFALSE, gcvFALSE);
++ }
++
++ index += _CLOSE_RANGE();
++
++ if(((((gctUINT32) (Context->hardware->identity.chipMinorFeatures4)) >> (0 ? 25:25) & ((gctUINT32) ((((1 ? 25:25) - (0 ? 25:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:25) - (0 ? 25:25) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 25:25) - (0 ? 25:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:25) - (0 ? 25:25) + 1))))))))
++ {
++ index += _State(Context, index, 0x03860 >> 2, 0x6, 1, gcvFALSE, gcvFALSE);
++ index += _CLOSE_RANGE();
++ }
++
++ if (halti3)
++ {
++ index += _State(Context, index, 0x01A80 >> 2, 0x00000000, 8, gcvFALSE, gcvTRUE);
++ index += _CLOSE_RANGE();
++ }
++
++ /* Semaphore/stall. */
++ index += _SemaphoreStall(Context, index);
++#endif
++
++ /**************************************************************************/
++ /* Link to another address. ***********************************************/
++
++ Context->linkIndex3D = (gctUINT)index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++ /* Store the end of the context buffer. */
++ Context->bufferSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /**************************************************************************/
++ /* Pipe switch for the case where neither 2D nor 3D are used. *************/
++
++ /* Store the 3D entry index. */
++ Context->entryOffsetXDFrom2D = (gctUINT)index * gcmSIZEOF(gctUINT32);
++
++ /* Flush 2D pipe. */
++ index += _FlushPipe(Context, index, gcvPIPE_2D);
++
++ /* Switch to 3D pipe. */
++ index += _SwitchPipe(Context, index, gcvPIPE_3D);
++
++ /* Store the location of the link. */
++ Context->linkIndexXD = (gctUINT)index;
++
++ if (buffer != gcvNULL)
++ {
++ buffer[index + 0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[index + 1]
++ = 0;
++ }
++
++ index += 2;
++
++
++ /**************************************************************************/
++ /* Save size for buffer. **************************************************/
++
++ Context->totalSize = index * gcmSIZEOF(gctUINT32);
++
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++static gceSTATUS
++_DestroyContext(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ if (Context != gcvNULL)
++ {
++ gcsCONTEXT_PTR bufferHead;
++
++ /* Free context buffers. */
++ for (bufferHead = Context->buffer; Context->buffer != gcvNULL;)
++ {
++ /* Get a shortcut to the current buffer. */
++ gcsCONTEXT_PTR buffer = Context->buffer;
++
++ /* Get the next buffer. */
++ gcsCONTEXT_PTR next = buffer->next;
++
++ /* Last item? */
++ if (next == bufferHead)
++ {
++ next = gcvNULL;
++ }
++
++ /* Destroy the signal. */
++ if (buffer->signal != gcvNULL)
++ {
++ gcmkONERROR(gckOS_DestroySignal(
++ Context->os, buffer->signal
++ ));
++
++ buffer->signal = gcvNULL;
++ }
++
++ /* Free state delta map. */
++ if (buffer->logical != gcvNULL)
++ {
++ if (Context->hardware->kernel->virtualCommandBuffer)
++ {
++ gcmkONERROR(gckEVENT_DestroyVirtualCommandBuffer(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckEVENT_FreeContiguousMemory(
++ Context->hardware->kernel->eventObj,
++ Context->totalSize,
++ buffer->physical,
++ buffer->logical,
++ gcvKERNEL_PIXEL
++ ));
++ }
++
++ buffer->logical = gcvNULL;
++ }
++
++ /* Free context buffer. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, buffer));
++
++ /* Remove from the list. */
++ Context->buffer = next;
++ }
++
++#if gcdSECURE_USER
++ /* Free the hint array. */
++ if (Context->hint != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->hint));
++ }
++#endif
++ /* Free record array copy. */
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (Context->recordArrayMap != gcvNULL)
++ {
++ gcsRECORD_ARRAY_MAP_PTR map = Context->recordArrayMap;
++
++ do
++ {
++ /* Free record array. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, map->kData));
++ map = map->next;
++ }
++ while (map != Context->recordArrayMap);
++
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArrayMap));
++ }
++#else
++ if (Context->recordArray != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->recordArray));
++ }
++#endif
++
++ /* Free the state mapping. */
++ if (Context->map != gcvNULL)
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context->map));
++ }
++
++ /* Mark the gckCONTEXT object as unknown. */
++ Context->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCONTEXT object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Context->os, Context));
++ }
++
++OnError:
++ return status;
++}
++
++
++/******************************************************************************\
++**************************** Context Management API ****************************
++\******************************************************************************/
++
++/******************************************************************************\
++**
++** gckCONTEXT_Construct
++**
++** Construct a new gckCONTEXT object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gckHARDWARE Hardware
++** Pointer to gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable thet will receive the gckCONTEXT object
++** pointer.
++*/
++#if (gcdENABLE_3D || gcdENABLE_2D)
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gctUINT32 allocationSize;
++ gctUINT i;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Os=0x%08X Hardware=0x%08X", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++
++
++ /**************************************************************************/
++ /* Allocate and initialize basic fields of gckCONTEXT. ********************/
++
++ /* The context object size. */
++ allocationSize = gcmSIZEOF(struct _gckCONTEXT);
++
++ /* Allocate the object. */
++ gcmkONERROR(gckOS_Allocate(
++ Os, allocationSize, &pointer
++ ));
++
++ context = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(context, allocationSize));
++
++ /* Initialize the gckCONTEXT object. */
++ context->object.type = gcvOBJ_CONTEXT;
++ context->os = Os;
++ context->hardware = Hardware;
++
++
++#if !gcdENABLE_3D
++ context->entryPipe = gcvPIPE_2D;
++ context->exitPipe = gcvPIPE_2D;
++#elif gcdCMD_NO_2D_CONTEXT
++ context->entryPipe = gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#else
++ context->entryPipe
++ = (((((gctUINT32) (context->hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) )
++ ? gcvPIPE_2D
++ : gcvPIPE_3D;
++ context->exitPipe = gcvPIPE_3D;
++#endif
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Hardware,
++ &context->alignment,
++ &context->reservedHead,
++ &context->reservedTail
++ ));
++
++ /* Mark the context as dirty to force loading of the entire state table
++ the first time. */
++ context->dirty = gcvTRUE;
++
++
++ /**************************************************************************/
++ /* Get the size of the context buffer. ************************************/
++
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++
++ /**************************************************************************/
++ /* Compute the size of the record array. **********************************/
++
++ context->recordArraySize
++ = gcmSIZEOF(gcsSTATE_DELTA_RECORD) * (gctUINT)context->stateCount;
++
++
++ if (context->stateCount > 0)
++ {
++ /**************************************************************************/
++ /* Allocate and reset the state mapping table. ****************************/
++
++ /* Allocate the state mapping table. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsSTATE_MAP) * context->stateCount,
++ &pointer
++ ));
++
++ context->map = pointer;
++
++ /* Zero the state mapping table. */
++ gcmkONERROR(gckOS_ZeroMemory(
++ context->map, gcmSIZEOF(gcsSTATE_MAP) * context->stateCount
++ ));
++
++
++ /**************************************************************************/
++ /* Allocate the hint array. ***********************************************/
++
++#if gcdSECURE_USER
++ /* Allocate hints. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gctBOOL) * context->stateCount,
++ &pointer
++ ));
++
++ context->hint = pointer;
++#endif
++ }
++
++ /**************************************************************************/
++ /* Allocate the context and state delta buffers. **************************/
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i += 1)
++ {
++ /* Allocate a context buffer. */
++ gcsCONTEXT_PTR buffer;
++
++ gctSIZE_T totalSize = context->totalSize;
++
++ /* Allocate the context buffer structure. */
++ gcmkONERROR(gckOS_Allocate(
++ Os,
++ gcmSIZEOF(gcsCONTEXT),
++ &pointer
++ ));
++
++ buffer = pointer;
++
++ /* Reset the context buffer structure. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ buffer, gcmSIZEOF(gcsCONTEXT)
++ ));
++
++ /* Append to the list. */
++ if (context->buffer == gcvNULL)
++ {
++ buffer->next = buffer;
++ context->buffer = buffer;
++ }
++ else
++ {
++ buffer->next = context->buffer->next;
++ context->buffer->next = buffer;
++ }
++
++ /* Set the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ buffer->num = i;
++#endif
++
++ /* Create the busy signal. */
++ gcmkONERROR(gckOS_CreateSignal(
++ Os, gcvFALSE, &buffer->signal
++ ));
++
++ /* Set the signal, buffer is currently not busy. */
++ gcmkONERROR(gckOS_Signal(
++ Os, buffer->signal, gcvTRUE
++ ));
++
++ /* Create a new physical context buffer. */
++ if (context->hardware->kernel->virtualCommandBuffer)
++ {
++ gcmkONERROR(gckKERNEL_AllocateVirtualCommandBuffer(
++ context->hardware->kernel,
++ gcvFALSE,
++ &totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++ gcmkONERROR(gckKERNEL_GetGPUAddress(
++ context->hardware->kernel,
++ pointer,
++ gcvFALSE,
++ &address
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Os,
++ gcvFALSE,
++ &totalSize,
++ &buffer->physical,
++ &pointer
++ ));
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ context->hardware,
++ pointer,
++ gcvFALSE,
++ &address
++ ));
++ }
++
++ buffer->logical = pointer;
++ buffer->address = address;
++
++ /* Set gckEVENT object pointer. */
++ buffer->eventObj = Hardware->kernel->eventObj;
++
++ /* Set the pointers to the LINK commands. */
++ if (context->linkIndex2D != 0)
++ {
++ buffer->link2D = &buffer->logical[context->linkIndex2D];
++ }
++
++ if (context->linkIndex3D != 0)
++ {
++ buffer->link3D = &buffer->logical[context->linkIndex3D];
++ }
++
++ if (context->linkIndexXD != 0)
++ {
++ gctPOINTER xdLink;
++ gctUINT32 xdEntryAddress;
++ gctUINT32 xdEntrySize;
++ gctUINT32 linkBytes;
++
++ /* Determine LINK parameters. */
++ xdLink
++ = &buffer->logical[context->linkIndexXD];
++
++ xdEntryAddress
++ = buffer->address
++ + context->entryOffsetXDFrom3D;
++
++ xdEntrySize
++ = context->bufferSize
++ - context->entryOffsetXDFrom3D;
++
++ /* Query LINK size. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware, gcvNULL, 0, 0, &linkBytes
++ ));
++
++ /* Generate a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Hardware,
++ xdLink,
++ xdEntryAddress,
++ xdEntrySize,
++ &linkBytes
++ ));
++ }
++ }
++
++
++ /**************************************************************************/
++ /* Initialize the context buffers. ****************************************/
++
++ /* Initialize the current context buffer. */
++ gcmkONERROR(_InitializeContextBuffer(context));
++
++ /* Make all created contexts equal. */
++ {
++ gcsCONTEXT_PTR currContext, tempContext;
++
++ /* Set the current context buffer. */
++ currContext = context->buffer;
++
++ /* Get the next context buffer. */
++ tempContext = currContext->next;
++
++ /* Loop through all buffers. */
++ while (tempContext != currContext)
++ {
++ if (tempContext == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ /* Copy the current context. */
++ gckOS_MemCopy(
++ tempContext->logical,
++ currContext->logical,
++ context->totalSize
++ );
++
++ /* Get the next context buffer. */
++ tempContext = tempContext->next;
++ }
++ }
++
++ /* Return pointer to the gckCONTEXT object. */
++ *Context = context;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%08X", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back on error. */
++ gcmkVERIFY_OK(_DestroyContext(context));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/******************************************************************************\
++**
++** gckCONTEXT_Destroy
++**
++** Destroy a gckCONTEXT object.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%08X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Destroy the context and all related objects. */
++ status = _DestroyContext(Context);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/******************************************************************************\
++**
++** gckCONTEXT_Update
++**
++** Merge all pending state delta buffers into the current context buffer.
++**
++** INPUT:
++**
++** gckCONTEXT Context
++** Pointer to an gckCONTEXT object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ )
++{
++#if gcdENABLE_3D
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSTATE_DELTA _stateDelta;
++ gckKERNEL kernel;
++ gcsCONTEXT_PTR buffer;
++ gcsSTATE_MAP_PTR map;
++ gctBOOL needCopy = gcvFALSE;
++ gcsSTATE_DELTA_PTR nDelta;
++ gcsSTATE_DELTA_PTR uDelta = gcvNULL;
++ gcsSTATE_DELTA_PTR kDelta = gcvNULL;
++ gcsSTATE_DELTA_RECORD_PTR record;
++ gcsSTATE_DELTA_RECORD_PTR recordArray = gcvNULL;
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ gcsRECORD_ARRAY_MAP_PTR recordArrayMap = gcvNULL;
++#endif
++ gctUINT elementCount;
++ gctUINT address;
++ gctUINT32 mask;
++ gctUINT32 data;
++ gctUINT index;
++ gctUINT i, j;
++
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++
++ gcmkHEADER_ARG(
++ "Context=0x%08X ProcessID=%d StateDelta=0x%08X",
++ Context, ProcessID, StateDelta
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ /* Get a shortcut to the kernel object. */
++ kernel = Context->hardware->kernel;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Context->os, ProcessID, &needCopy));
++
++ /* Allocate the copy buffer for the user record array. */
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy && (Context->recordArrayMap == gcvNULL))
++ {
++ /* Allocate enough maps. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ gcmSIZEOF(gcsRECORD_ARRAY_MAP_PTR) * gcdCONTEXT_BUFFER_COUNT,
++ (gctPOINTER *) &Context->recordArrayMap
++ ));
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i++)
++ {
++ /* Next mapping id. */
++ gctUINT n = (i + 1) % gcdCONTEXT_BUFFER_COUNT;
++
++ recordArrayMap = &Context->recordArrayMap[i];
++
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &recordArrayMap->kData
++ ));
++
++ /* Initialize fields. */
++ recordArrayMap->key = 0;
++ recordArrayMap->next = &Context->recordArrayMap[n];
++ }
++ }
++#else
++ if (needCopy && (Context->recordArray == gcvNULL))
++ {
++ /* Allocate the buffer. */
++ gcmkONERROR(gckOS_Allocate(
++ Context->os,
++ Context->recordArraySize,
++ (gctPOINTER *) &Context->recordArray
++ ));
++ }
++#endif
++
++ /* Get the current context buffer. */
++ buffer = Context->buffer;
++
++ /* Wait until the context buffer becomes available; this will
++ also reset the signal and mark the buffer as busy. */
++ gcmkONERROR(gckOS_WaitSignal(
++ Context->os, buffer->signal, gcvINFINITE
++ ));
++
++#if gcdSECURE_USER
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE) && 1 && gcdENABLE_3D
++ /* Update current context token. */
++ buffer->logical[Context->map[0x0E14].index]
++ = (gctUINT32)gcmPTR2INT32(Context);
++#endif
++
++ /* Are there any pending deltas? */
++ if (buffer->deltaCount != 0)
++ {
++ /* Get the state map. */
++ map = Context->map;
++
++ /* Get the first delta item. */
++ uDelta = buffer->delta;
++
++ /* Reset the vertex stream count. */
++ elementCount = 0;
++
++ /* Merge all pending deltas. */
++ for (i = 0; i < buffer->deltaCount; i += 1)
++ {
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy)
++ {
++ recordArray = gcvNULL;
++ recordArrayMap = Context->recordArrayMap;
++
++ do
++ {
++ /* Check if recordArray is alreay opened. */
++ if (recordArrayMap->key == kDelta->recordArray)
++ {
++ /* Found. */
++ recordArray = recordArrayMap->kData;
++ break;
++ }
++
++ recordArrayMap = recordArrayMap->next;
++ }
++ while (recordArrayMap != Context->recordArrayMap);
++
++ if (recordArray == gcvNULL)
++ {
++ while (recordArrayMap->key != 0)
++ {
++ /* Found an empty slot. */
++ recordArrayMap = recordArrayMap->next;
++ }
++
++ /* Get access to the state records. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ kernel->os,
++ recordArrayMap->kData,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize
++ ));
++
++ /* Save user pointer as key. */
++ recordArrayMap->key = kDelta->recordArray;
++ recordArray = recordArrayMap->kData;
++ }
++ }
++ else
++ {
++ /* Get access to the state records. */
++ gcmkONERROR(gckOS_MapUserPointer(
++ kernel->os,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++#else
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ Context->recordArray,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++#endif
++
++ /* Merge all pending states. */
++ for (j = 0; j < kDelta->recordCount; j += 1)
++ {
++ if (j >= Context->stateCount)
++ {
++ break;
++ }
++
++ /* Get the current state record. */
++ record = &recordArray[j];
++
++ /* Get the state address. */
++ address = record->address;
++
++ /* Make sure the state is a part of the mapping table. */
++ if (address >= Context->stateCount)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): State 0x%04X is not mapped.\n",
++ __FUNCTION__, __LINE__,
++ address
++ );
++
++ continue;
++ }
++
++ /* Get the state index. */
++ index = map[address].index;
++
++ /* Skip the state if not mapped. */
++ if (index == 0)
++ {
++ continue;
++ }
++
++ /* Get the data mask. */
++ mask = record->mask;
++
++ /* Masked states that are being completly reset or regular states. */
++ if ((mask == 0) || (mask == ~0U))
++ {
++ /* Get the new data value. */
++ data = record->data;
++
++ /* Process special states. */
++ if (address == 0x0595)
++ {
++ /* Force auto-disable to be disabled. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1))))))) << (0 ? 13:13)));
++ }
++
++#if gcdSECURE_USER
++ /* Do we need to convert the logical address? */
++ if (Context->hint[address])
++ {
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) &data
++ ));
++ }
++#endif
++
++ /* Set new data. */
++ buffer->logical[index] = data;
++ }
++
++ /* Masked states that are being set partially. */
++ else
++ {
++ buffer->logical[index]
++ = (~mask & buffer->logical[index])
++ | (mask & record->data);
++ }
++ }
++
++ /* Get the element count. */
++ if (kDelta->elementCount != 0)
++ {
++ elementCount = kDelta->elementCount;
++ }
++
++ /* Dereference delta. */
++ kDelta->refCount -= 1;
++ gcmkASSERT(kDelta->refCount >= 0);
++
++ /* Get the next state delta. */
++ nDelta = gcmUINT64_TO_PTR(kDelta->next);
++
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ if (needCopy)
++ {
++ if (kDelta->refCount == 0)
++ {
++ /* No other reference, reset the mapping. */
++ recordArrayMap->key = 0;
++ }
++ }
++ else
++ {
++ /* Close access to the state records. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ kernel->os,
++ gcmUINT64_TO_PTR(kDelta->recordArray),
++ Context->recordArraySize,
++ (gctPOINTER *) recordArray
++ ));
++
++ recordArray = gcvNULL;
++ }
++#else
++ /* Get access to the state records. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++#endif
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Update the user delta pointer. */
++ uDelta = nDelta;
++ }
++
++ /* Hardware disables all input streams when the stream 0 is programmed,
++ it then reenables those streams that were explicitely programmed by
++ the software. Because of this we cannot program the entire array of
++ values, otherwise we'll get all streams reenabled, but rather program
++ only those that are actully needed by the software. */
++ if (elementCount != 0)
++ {
++ gctUINT base;
++ gctUINT nopCount;
++ gctUINT32_PTR nop;
++ gctUINT fe2vsCount = 12;
++
++ if ((((((gctUINT32) (Context->hardware->identity.chipMinorFeatures1)) >> (0 ? 23:23)) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) ))
++ {
++ fe2vsCount = 16;
++ }
++
++ /* Determine the base index of the vertex stream array. */
++ base = map[0x0180].index;
++
++ /* Set the proper state count. */
++ buffer->logical[base - 1]
++ = ((((gctUINT32) (buffer->logical[base - 1])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (elementCount ) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine the number of NOP commands. */
++ nopCount
++ = (fe2vsCount / 2)
++ - (elementCount / 2);
++
++ /* Determine the location of the first NOP. */
++ nop = &buffer->logical[base + (elementCount | 1)];
++
++ /* Fill the unused space with NOPs. */
++ for (i = 0; i < nopCount; i += 1)
++ {
++ if (nop >= buffer->logical + Context->totalSize)
++ {
++ break;
++ }
++
++ /* Generate a NOP command. */
++ *nop = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ /* Advance. */
++ nop += 2;
++ }
++ }
++
++ /* Reset pending deltas. */
++ buffer->deltaCount = 0;
++ buffer->delta = gcvNULL;
++ }
++
++ /* Set state delta user pointer. */
++ uDelta = StateDelta;
++
++ /* Get access to the state delta. */
++ gcmkONERROR(gckKERNEL_OpenUserData(
++ kernel, needCopy,
++ &_stateDelta,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* State delta cannot be attached to anything yet. */
++ if (kDelta->refCount != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): kDelta->refCount = %d (has to be 0).\n",
++ __FUNCTION__, __LINE__,
++ kDelta->refCount
++ );
++ }
++
++ /* Attach to all contexts. */
++ buffer = Context->buffer;
++
++ do
++ {
++ /* Attach to the context if nothing is attached yet. If a delta
++ is allready attached, all we need to do is to increment
++ the number of deltas in the context. */
++ if (buffer->delta == gcvNULL)
++ {
++ buffer->delta = uDelta;
++ }
++
++ /* Update reference count. */
++ kDelta->refCount += 1;
++
++ /* Update counters. */
++ buffer->deltaCount += 1;
++
++ /* Get the next context buffer. */
++ buffer = buffer->next;
++
++ if (buffer == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++ }
++ while (Context->buffer != buffer);
++
++ /* Close access to the current state delta. */
++ gcmkONERROR(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Schedule an event to mark the context buffer as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ buffer->eventObj, buffer->signal, gcvKERNEL_PIXEL
++ ));
++
++ /* Advance to the next context buffer. */
++ Context->buffer = buffer->next;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Get access to the state records. */
++ if (kDelta != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvFALSE,
++ gcmUINT64_TO_PTR(kDelta->recordArray), Context->recordArraySize,
++ (gctPOINTER *) &recordArray
++ ));
++ }
++
++ /* Close access to the current state delta. */
++ gcmkVERIFY_OK(gckKERNEL_CloseUserData(
++ kernel, needCopy,
++ gcvTRUE,
++ uDelta, gcmSIZEOF(gcsSTATE_DELTA),
++ (gctPOINTER *) &kDelta
++ ));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++gceSTATUS
++gckCONTEXT_MapBuffer(
++ IN gckCONTEXT Context,
++ OUT gctUINT32 *Physicals,
++ OUT gctUINT64 *Logicals,
++ OUT gctUINT32 *Bytes
++ )
++{
++ gceSTATUS status;
++ int i = 0;
++ gctSIZE_T pageCount;
++ gckVIRTUAL_COMMAND_BUFFER_PTR commandBuffer;
++ gckKERNEL kernel = Context->hardware->kernel;
++ gctPOINTER logical;
++ gctPHYS_ADDR physical;
++
++ gcsCONTEXT_PTR buffer;
++
++ gcmkHEADER();
++
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ buffer = Context->buffer;
++
++ for (i = 0; i < gcdCONTEXT_BUFFER_COUNT; i++)
++ {
++ if (kernel->virtualCommandBuffer)
++ {
++ commandBuffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)buffer->physical;
++ physical = commandBuffer->physical;
++
++ gcmkONERROR(gckOS_CreateUserVirtualMapping(
++ kernel->os,
++ physical,
++ Context->totalSize,
++ &logical,
++ &pageCount));
++ }
++ else
++ {
++ physical = buffer->physical;
++
++ gcmkONERROR(gckOS_MapMemory(
++ kernel->os,
++ physical,
++ Context->totalSize,
++ &logical));
++ }
++
++ Physicals[i] = gcmPTR_TO_NAME(physical);
++
++ Logicals[i] = gcmPTR_TO_UINT64(logical);
++
++ buffer = buffer->next;
++ }
++
++ *Bytes = (gctUINT)Context->totalSize;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_context.h 2015-07-27 23:13:06.186908111 +0200
+@@ -0,0 +1,183 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_context_h_
++#define __gc_hal_kernel_context_h_
++
++#include "gc_hal_kernel_buffer.h"
++
++/* Exprimental optimization. */
++#define REMOVE_DUPLICATED_COPY_FROM_USER 1
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Maps state locations within the context buffer. */
++typedef struct _gcsSTATE_MAP * gcsSTATE_MAP_PTR;
++typedef struct _gcsSTATE_MAP
++{
++ /* Index of the state in the context buffer. */
++ gctUINT index;
++
++ /* State mask. */
++ gctUINT32 mask;
++}
++gcsSTATE_MAP;
++
++/* Context buffer. */
++typedef struct _gcsCONTEXT * gcsCONTEXT_PTR;
++typedef struct _gcsCONTEXT
++{
++ /* For debugging: the number of context buffer in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Context busy signal. */
++ gctSIGNAL signal;
++
++ /* Physical address of the context buffer. */
++ gctPHYS_ADDR physical;
++
++ /* Logical address of the context buffer. */
++ gctUINT32_PTR logical;
++
++ /* Hardware address of the context buffer. */
++ gctUINT32 address;
++
++ /* Pointer to the LINK commands. */
++ gctPOINTER link2D;
++ gctPOINTER link3D;
++
++ /* The number of pending state deltas. */
++ gctUINT deltaCount;
++
++ /* Pointer to the first delta to be applied. */
++ gcsSTATE_DELTA_PTR delta;
++
++ /* Next context buffer. */
++ gcsCONTEXT_PTR next;
++}
++gcsCONTEXT;
++
++typedef struct _gcsRECORD_ARRAY_MAP * gcsRECORD_ARRAY_MAP_PTR;
++struct _gcsRECORD_ARRAY_MAP
++{
++ /* User pointer key. */
++ gctUINT64 key;
++
++ /* Kernel memory buffer. */
++ gcsSTATE_DELTA_RECORD_PTR kData;
++
++ /* Next map. */
++ gcsRECORD_ARRAY_MAP_PTR next;
++
++};
++
++/* gckCONTEXT structure that hold the current context. */
++struct _gckCONTEXT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Command buffer alignment. */
++ gctUINT32 alignment;
++ gctUINT32 reservedHead;
++ gctUINT32 reservedTail;
++
++ /* Context buffer metrics. */
++ gctSIZE_T stateCount;
++ gctUINT32 totalSize;
++ gctUINT32 bufferSize;
++ gctUINT32 linkIndex2D;
++ gctUINT32 linkIndex3D;
++ gctUINT32 linkIndexXD;
++ gctUINT32 entryOffset3D;
++ gctUINT32 entryOffsetXDFrom2D;
++ gctUINT32 entryOffsetXDFrom3D;
++
++ /* Dirty flags. */
++ gctBOOL dirty;
++ gctBOOL dirty2D;
++ gctBOOL dirty3D;
++ gcsCONTEXT_PTR dirtyBuffer;
++
++ /* State mapping. */
++ gcsSTATE_MAP_PTR map;
++
++ /* List of context buffers. */
++ gcsCONTEXT_PTR buffer;
++
++ /* A copy of the user record array. */
++ gctUINT recordArraySize;
++#if REMOVE_DUPLICATED_COPY_FROM_USER
++ gcsRECORD_ARRAY_MAP_PTR recordArrayMap;
++#else
++ gcsSTATE_DELTA_RECORD_PTR recordArray;
++#endif
++
++ /* Requested pipe select for context. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Variables used for building state buffer. */
++ gctUINT32 lastAddress;
++ gctSIZE_T lastSize;
++ gctUINT32 lastIndex;
++ gctBOOL lastFixed;
++
++ gctUINT32 pipeSelectBytes;
++
++ /* Hint array. */
++#if gcdSECURE_USER
++ gctBOOL_PTR hint;
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gcsPROFILER_COUNTERS latestProfiler;
++ gcsPROFILER_COUNTERS histroyProfiler;
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++#endif
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_context_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.c 2015-07-27 23:13:06.186908111 +0200
+@@ -0,0 +1,8036 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#if VIVANTE_PROFILER_CONTEXT
++#include "gc_hal_kernel_context.h"
++#endif
++
++#define gcdDISABLE_FE_L2 1
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++#define gcmSEMAPHORESTALL(buffer) \
++ do \
++ { \
++ /* Arm the PE-FE Semaphore. */ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, 1) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, 0x0E02); \
++ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, AQ_SEMAPHORE, SOURCE, FRONT_END) \
++ | gcmSETFIELDVALUE(0, AQ_SEMAPHORE, DESTINATION, PIXEL_ENGINE);\
++ \
++ /* STALL FE until PE is done flushing. */ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, STALL_COMMAND, OPCODE, STALL); \
++ \
++ *buffer++ \
++ = gcmSETFIELDVALUE(0, STALL_STALL, SOURCE, FRONT_END) \
++ | gcmSETFIELDVALUE(0, STALL_STALL, DESTINATION, PIXEL_ENGINE); \
++ } while(0)
++
++typedef struct _gcsiDEBUG_REGISTERS * gcsiDEBUG_REGISTERS_PTR;
++typedef struct _gcsiDEBUG_REGISTERS
++{
++ gctSTRING module;
++ gctUINT index;
++ gctUINT shift;
++ gctUINT data;
++ gctUINT count;
++ gctUINT32 signature;
++}
++gcsiDEBUG_REGISTERS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gctBOOL
++_IsHardwareMatch(
++ IN gckHARDWARE Hardware,
++ IN gctINT32 ChipModel,
++ IN gctUINT32 ChipRevision
++ )
++{
++ return ((Hardware->identity.chipModel == ChipModel) &&
++ (Hardware->identity.chipRevision == ChipRevision));
++}
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gceSTATUS status;
++
++ gctUINT32 chipIdentity;
++
++ gctUINT32 streamCount = 0;
++ gctUINT32 registerMax = 0;
++ gctUINT32 threadCount = 0;
++ gctUINT32 shaderCoreCount = 0;
++ gctUINT32 vertexCacheSize = 0;
++ gctUINT32 vertexOutputBufferSize = 0;
++ gctUINT32 pixelPipes = 0;
++ gctUINT32 instructionCount = 0;
++ gctUINT32 numConstants = 0;
++ gctUINT32 bufferSize = 0;
++ gctUINT32 varyingsCount = 0;
++#if gcdMULTI_GPU
++ gctUINT32 gpuCoreCount = 0;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /***************************************************************************
++ ** Get chip ID and revision.
++ */
++
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00018,
++ &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ Identity->chipModel = gcv500;
++ Identity->chipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00020,
++ (gctUINT32_PTR) &Identity->chipModel));
++
++ if (((Identity->chipModel & 0xFF00) == 0x0400)
++ && (Identity->chipModel != 0x0420)
++ && (Identity->chipModel != 0x0428))
++ {
++ Identity->chipModel = (gceCHIPMODEL) (Identity->chipModel & 0x0400);
++ }
++
++ /* Read CHIP_REV register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00024,
++ &Identity->chipRevision));
++
++ if ((Identity->chipModel == gcv300)
++ && (Identity->chipRevision == 0x2201)
++ )
++ {
++ gctUINT32 chipDate;
++ gctUINT32 chipTime;
++
++ /* Read date and time registers. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00028,
++ &chipDate));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0002C,
++ &chipTime));
++
++ if ((chipDate == 0x20080814) && (chipTime == 0x12051100))
++ {
++ /* This IP has an ECO; put the correct revision in it. */
++ Identity->chipRevision = 0x1051;
++ }
++ }
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x000A8,
++ &Identity->productID));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipModel=%X",
++ Identity->chipModel);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipRevision=%X",
++ Identity->chipRevision);
++
++
++ /***************************************************************************
++ ** Get chip features.
++ */
++
++ /* Read chip feature register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0001C,
++ &Identity->chipFeatures));
++
++#if gcdENABLE_3D
++ /* Disable fast clear on GC700. */
++ if (Identity->chipModel == gcv700)
++ {
++ Identity->chipFeatures
++ = ((((gctUINT32) (Identity->chipFeatures)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++#endif
++
++ if (((Identity->chipModel == gcv500) && (Identity->chipRevision < 2))
++ || ((Identity->chipModel == gcv300) && (Identity->chipRevision < 0x2000))
++ )
++ {
++ /* GC500 rev 1.x and GC300 rev < 2.0 doesn't have these registers. */
++ Identity->chipMinorFeatures = 0;
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ Identity->chipMinorFeatures5 = 0;
++ }
++ else
++ {
++ /* Read chip minor feature register #0. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00034,
++ &Identity->chipMinorFeatures));
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))))
++ )
++ {
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00074,
++ &Identity->chipMinorFeatures1));
++
++ /* Read chip minor featuress register #2. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00084,
++ &Identity->chipMinorFeatures2));
++
++ /*Identity->chipMinorFeatures2 &= ~(0x1 << 3);*/
++
++ /* Read chip minor featuress register #1. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00088,
++ &Identity->chipMinorFeatures3));
++
++
++ /* Read chip minor featuress register #4. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00094,
++ &Identity->chipMinorFeatures4));
++
++ /* Read chip minor featuress register #5. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x000A0,
++ &Identity->chipMinorFeatures5));
++ }
++ else
++ {
++ /* Chip doesn't has minor features register #1 or 2 or 3 or 4. */
++ Identity->chipMinorFeatures1 = 0;
++ Identity->chipMinorFeatures2 = 0;
++ Identity->chipMinorFeatures3 = 0;
++ Identity->chipMinorFeatures4 = 0;
++ Identity->chipMinorFeatures5 = 0;
++ }
++ }
++
++ /* Get the Supertile layout in the hardware. */
++ if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))))
++ || ((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))))
++ {
++ Identity->superTileMode = 2;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))))
++ {
++ Identity->superTileMode = 1;
++ }
++ else
++ {
++ Identity->superTileMode = 0;
++ }
++
++ /* Exception for GC1000, revision 5035 & GC800, revision 4612 */
++ if (((Identity->chipModel == gcv1000) && ((Identity->chipRevision == 0x5035)
++ || (Identity->chipRevision == 0x5036)
++ || (Identity->chipRevision == 0x5037)
++ || (Identity->chipRevision == 0x5039)
++ || (Identity->chipRevision >= 0x5040)))
++ || ((Identity->chipModel == gcv800) && (Identity->chipRevision == 0x4612))
++ || ((Identity->chipModel == gcv600) && (Identity->chipRevision >= 0x4650))
++ || ((Identity->chipModel == gcv860) && (Identity->chipRevision == 0x4647))
++ || ((Identity->chipModel == gcv400) && (Identity->chipRevision >= 0x4633)))
++ {
++ Identity->superTileMode = 1;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipFeatures=0x%08X",
++ Identity->chipFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures=0x%08X",
++ Identity->chipMinorFeatures);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures1=0x%08X",
++ Identity->chipMinorFeatures1);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures2=0x%08X",
++ Identity->chipMinorFeatures2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures3=0x%08X",
++ Identity->chipMinorFeatures3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures4=0x%08X",
++ Identity->chipMinorFeatures4);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipMinorFeatures5=0x%08X",
++ Identity->chipMinorFeatures5);
++
++ /***************************************************************************
++ ** Get chip specs.
++ */
++
++ if (((((gctUINT32) (Identity->chipMinorFeatures)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ gctUINT32 specs, specs2, specs3, specs4;
++
++ /* Read gcChipSpecs register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00048,
++ &specs));
++
++ /* Extract the fields. */
++ registerMax = (((((gctUINT32) (specs)) >> (0 ? 7:4)) & ((gctUINT32) ((((1 ? 7:4) - (0 ? 7:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:4) - (0 ? 7:4) + 1)))))) );
++ threadCount = (((((gctUINT32) (specs)) >> (0 ? 11:8)) & ((gctUINT32) ((((1 ? 11:8) - (0 ? 11:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:8) - (0 ? 11:8) + 1)))))) );
++ shaderCoreCount = (((((gctUINT32) (specs)) >> (0 ? 24:20)) & ((gctUINT32) ((((1 ? 24:20) - (0 ? 24:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:20) - (0 ? 24:20) + 1)))))) );
++ vertexCacheSize = (((((gctUINT32) (specs)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ vertexOutputBufferSize = (((((gctUINT32) (specs)) >> (0 ? 31:28)) & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1)))))) );
++ pixelPipes = (((((gctUINT32) (specs)) >> (0 ? 27:25)) & ((gctUINT32) ((((1 ? 27:25) - (0 ? 27:25) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:25) - (0 ? 27:25) + 1)))))) );
++
++ /* Read gcChipSpecs2 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x00080,
++ &specs2));
++
++ instructionCount = (((((gctUINT32) (specs2)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ numConstants = (((((gctUINT32) (specs2)) >> (0 ? 31:16)) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1)))))) );
++ bufferSize = (((((gctUINT32) (specs2)) >> (0 ? 7:0)) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1)))))) );
++
++ /* Read gcChipSpecs3 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0008C,
++ &specs3));
++
++ varyingsCount = (((((gctUINT32) (specs3)) >> (0 ? 8:4)) & ((gctUINT32) ((((1 ? 8:4) - (0 ? 8:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:4) - (0 ? 8:4) + 1)))))) );
++#if gcdMULTI_GPU
++ gpuCoreCount = (((((gctUINT32) (specs3)) >> (0 ? 2:0)) & ((gctUINT32) ((((1 ? 2:0) - (0 ? 2:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:0) - (0 ? 2:0) + 1)))))) );
++#endif
++
++ /* Read gcChipSpecs4 register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os, Core,
++ 0x0009C,
++ &specs4));
++
++
++ streamCount = (((((gctUINT32) (specs4)) >> (0 ? 16:12)) & ((gctUINT32) ((((1 ? 16:12) - (0 ? 16:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:12) - (0 ? 16:12) + 1)))))) );
++ if (streamCount == 0)
++ {
++ /* Extract stream count from older register. */
++ streamCount = (((((gctUINT32) (specs)) >> (0 ? 3:0)) & ((gctUINT32) ((((1 ? 3:0) - (0 ? 3:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:0) - (0 ? 3:0) + 1)))))) );
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs1=0x%08X",
++ specs);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs2=0x%08X",
++ specs2);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs3=0x%08X",
++ specs3);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Identity: chipSpecs4=0x%08X",
++ specs4);
++ }
++
++ /* Get the number of pixel pipes. */
++ Identity->pixelPipes = gcmMAX(pixelPipes, 1);
++
++ /* Get the stream count. */
++ Identity->streamCount = (streamCount != 0)
++ ? streamCount
++ : (Identity->chipModel >= gcv1000) ? 4 : 1;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: streamCount=%u%s",
++ Identity->streamCount,
++ (streamCount == 0) ? " (default)" : "");
++
++ /* Get the vertex output buffer size. */
++ Identity->vertexOutputBufferSize = (vertexOutputBufferSize != 0)
++ ? 1 << vertexOutputBufferSize
++ : (Identity->chipModel == gcv400)
++ ? (Identity->chipRevision < 0x4000) ? 512
++ : (Identity->chipRevision < 0x4200) ? 256
++ : 128
++ : (Identity->chipModel == gcv530)
++ ? (Identity->chipRevision < 0x4200) ? 512
++ : 128
++ : 512;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexOutputBufferSize=%u%s",
++ Identity->vertexOutputBufferSize,
++ (vertexOutputBufferSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of threads. */
++ Identity->threadCount = (threadCount != 0)
++ ? 1 << threadCount
++ : (Identity->chipModel == gcv400) ? 64
++ : (Identity->chipModel == gcv500) ? 128
++ : (Identity->chipModel == gcv530) ? 128
++ : 256;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: threadCount=%u%s",
++ Identity->threadCount,
++ (threadCount == 0) ? " (default)" : "");
++
++ /* Get the number of shader cores. */
++ Identity->shaderCoreCount = (shaderCoreCount != 0)
++ ? shaderCoreCount
++ : (Identity->chipModel >= gcv1000) ? 2
++ : 1;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: shaderCoreCount=%u%s",
++ Identity->shaderCoreCount,
++ (shaderCoreCount == 0) ? " (default)" : "");
++
++ /* Get the vertex cache size. */
++ Identity->vertexCacheSize = (vertexCacheSize != 0)
++ ? vertexCacheSize
++ : 8;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: vertexCacheSize=%u%s",
++ Identity->vertexCacheSize,
++ (vertexCacheSize == 0) ? " (default)" : "");
++
++ /* Get the maximum number of temporary registers. */
++ Identity->registerMax = (registerMax != 0)
++ /* Maximum of registerMax/4 registers are accessible to 1 shader */
++ ? 1 << registerMax
++ : (Identity->chipModel == gcv400) ? 32
++ : 64;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: registerMax=%u%s",
++ Identity->registerMax,
++ (registerMax == 0) ? " (default)" : "");
++
++ /* Get the instruction count. */
++ Identity->instructionCount = (instructionCount == 0) ? 256
++ : (instructionCount == 1) ? 1024
++ : (instructionCount == 2) ? 2048
++ : (instructionCount == 0xFF) ? 512
++ : 256;
++
++ if (Identity->instructionCount == 256)
++ {
++ if ((Identity->chipModel == gcv2000 && Identity->chipRevision == 0x5108)
++ || Identity->chipModel == gcv880)
++ {
++ Identity->instructionCount = 512;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures3)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))))
++ {
++ Identity->instructionCount = 512;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: instructionCount=%u%s",
++ Identity->instructionCount,
++ (instructionCount == 0) ? " (default)" : "");
++
++ /* Get the number of constants. */
++ Identity->numConstants = (numConstants == 0) ? 168 : numConstants;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: numConstants=%u%s",
++ Identity->numConstants,
++ (numConstants == 0) ? " (default)" : "");
++
++ /* Get the buffer size. */
++ Identity->bufferSize = bufferSize;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Specs: bufferSize=%u%s",
++ Identity->bufferSize,
++ (bufferSize == 0) ? " (default)" : "");
++
++
++ if (varyingsCount != 0)
++ {
++ Identity->varyingsCount = varyingsCount;
++ }
++ else if (((((gctUINT32) (Identity->chipMinorFeatures1)) >> (0 ? 23:23) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))))
++ {
++ Identity->varyingsCount = 12;
++ }
++ else
++ {
++ Identity->varyingsCount = 8;
++ }
++
++ /* For some cores, it consumes two varying for position, so the max varying vectors should minus one. */
++ if ((Identity->chipModel == gcv5000 && Identity->chipRevision == 0x5434) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5222) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5208) ||
++ (Identity->chipModel == gcv4000 && Identity->chipRevision == 0x5245) ||
++ (Identity->chipModel == gcv3000 && Identity->chipRevision == 0x5435) ||
++ (Identity->chipModel == gcv2200 && Identity->chipRevision == 0x5244) ||
++ (Identity->chipModel == gcv1500 && Identity->chipRevision == 0x5246) ||
++ ((Identity->chipModel == gcv2100 || Identity->chipModel == gcv2000) && Identity->chipRevision == 0x5108) ||
++ (Identity->chipModel == gcv880 && (Identity->chipRevision == 0x5107 || Identity->chipRevision == 0x5106)))
++ {
++ Identity->varyingsCount -= 1;
++ }
++
++ Identity->chip2DControl = 0;
++ if (Identity->chipModel == gcv320)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x0002C,
++ &data));
++
++ if ((data != 33956864) &&
++ ((Identity->chipRevision == 0x5007) ||
++ (Identity->chipRevision == 0x5220)))
++ {
++ Identity->chip2DControl |= 0xFF &
++ (Identity->chipRevision == 0x5220 ? 8 :
++ (Identity->chipRevision == 0x5007 ? 12 : 0));
++ }
++
++ if (Identity->chipRevision == 0x5007)
++ {
++ /* Disable splitting rectangle. */
++ Identity->chip2DControl |= 0x100;
++
++ /* Enable 2D Flush. */
++ Identity->chip2DControl |= 0x200;
++ }
++ }
++
++#if gcdMULTI_GPU
++#if gcdMULTI_GPU > 1
++ Identity->gpuCoreCount = gpuCoreCount + 1;
++#else
++ Identity->gpuCoreCount = 1;
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#define gcdDEBUG_MODULE_CLOCK_GATING 0
++#define gcdDISABLE_MODULE_CLOCK_GATING 0
++#define gcdDISABLE_FE_CLOCK_GATING 0
++#define gcdDISABLE_PE_CLOCK_GATING 0
++#define gcdDISABLE_SH_CLOCK_GATING 0
++#define gcdDISABLE_PA_CLOCK_GATING 0
++#define gcdDISABLE_SE_CLOCK_GATING 0
++#define gcdDISABLE_RA_CLOCK_GATING 0
++#define gcdDISABLE_RA_EZ_CLOCK_GATING 0
++#define gcdDISABLE_RA_HZ_CLOCK_GATING 0
++#define gcdDISABLE_TX_CLOCK_GATING 0
++
++#if gcdDEBUG_MODULE_CLOCK_GATING
++gceSTATUS
++_ConfigureModuleLevelClockGating(
++ gckHARDWARE Hardware
++ )
++{
++ gctUINT32 data;
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++#if gcdDISABLE_FE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++#endif
++
++#if gcdDISABLE_PE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++#endif
++
++#if gcdDISABLE_SH_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++#endif
++
++#if gcdDISABLE_PA_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++#endif
++
++#if gcdDISABLE_SE_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++#endif
++
++#if gcdDISABLE_RA_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++#endif
++
++#if gcdDISABLE_TX_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++#endif
++
++#if gcdDISABLE_RA_EZ_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++#endif
++
++#if gcdDISABLE_RA_HZ_CLOCK_GATING
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++#endif
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++
++#if gcdDISABLE_MODULE_CLOCK_GATING
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++#endif
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_PowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckHARDWARE hardware = (gckHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++static gceSTATUS
++_VerifyDMA(
++ IN gckOS Os,
++ IN gceCORE Core,
++ gctUINT32_PTR Address1,
++ gctUINT32_PTR Address2,
++ gctUINT32_PTR State1,
++ gctUINT32_PTR State2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State1));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address1));
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x660, State2));
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x664, Address2));
++
++ if (*Address1 != *Address2)
++ {
++ break;
++ }
++
++ if (*State1 != *State2)
++ {
++ break;
++ }
++ }
++
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_DumpDebugRegisters(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gcsiDEBUG_REGISTERS_PTR Descriptor
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 select;
++ gctUINT32 data = 0;
++ gctUINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Descriptor=0x%X", Os, Descriptor);
++
++ gcmkPRINT_N(4, " %s debug registers:\n", Descriptor->module);
++
++ for (i = 0; i < Descriptor->count; i += 1)
++ {
++ select = i << Descriptor->shift;
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ gcmkPRINT_N(12, " [0x%02X] 0x%08X\n", i, data);
++ }
++
++ select = 0xF << Descriptor->shift;
++
++ for (i = 0; i < 500; i += 1)
++ {
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, Descriptor->index, select));
++#if gcdFPGA_BUILD
++ gcmkONERROR(gckOS_Delay(Os, 1000));
++#endif
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, Descriptor->data, &data));
++
++ if (data == Descriptor->signature)
++ {
++ break;
++ }
++ }
++
++ if (i == 500)
++ {
++ gcmkPRINT_N(4, " failed to obtain the signature (read 0x%08X).\n", data);
++ }
++ else
++ {
++ gcmkPRINT_N(8, " signature = 0x%08X (%d read attempt(s))\n", data, i + 1);
++ }
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IsGPUPresent(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ control));
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Hardware->os,
++ Hardware->core,
++ &identity));
++
++ /* Check if these are the same values as saved before. */
++ if ((Hardware->identity.chipModel != identity.chipModel)
++ || (Hardware->identity.chipRevision != identity.chipRevision)
++ || (Hardware->identity.chipFeatures != identity.chipFeatures)
++ || (Hardware->identity.chipMinorFeatures != identity.chipMinorFeatures)
++ || (Hardware->identity.chipMinorFeatures1 != identity.chipMinorFeatures1)
++ || (Hardware->identity.chipMinorFeatures2 != identity.chipMinorFeatures2)
++ )
++ {
++ gcmkPRINT("[galcore]: GPU is not present.");
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++_FlushCache(
++ gckHARDWARE Hardware,
++ gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 bytes, requested;
++ gctPOINTER buffer;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(Command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(Command, requested));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++gctBOOL
++_IsGPUIdle(
++ IN gctUINT32 Idle
++ )
++{
++ return (((((gctUINT32) (Idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) )
++ && (((((gctUINT32) (Idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) )
++ ;
++}
++
++/******************************************************************************\
++****************************** gckHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHARDWARE_Construct
++**
++** Construct a new gckHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** OUTPUT:
++**
++** gckHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckHARDWARE
++** object.
++*/
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware = gcvNULL;
++ gctUINT16 data = 0xff00;
++ gctPOINTER pointer = gcvNULL;
++#if gcdMULTI_GPU_AFFINITY
++ gctUINT32 control;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ /* Enable the GPU. */
++ gcmkONERROR(gckOS_SetGPUPower(Os, Core, gcvTRUE, gcvTRUE));
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Allocate the gckHARDWARE object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckHARDWARE),
++ &pointer));
++
++ hardware = (gckHARDWARE) pointer;
++
++ /* Initialize the gckHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++ hardware->core = Core;
++
++ /* Identify the hardware. */
++ gcmkONERROR(_IdentifyHardware(Os, Core, &hardware->identity));
++
++ /* Determine the hardware type */
++ switch (hardware->identity.chipModel)
++ {
++ case gcv350:
++ case gcv355:
++ hardware->type = gcvHARDWARE_VG;
++ break;
++
++ case gcv200:
++ case gcv300:
++ case gcv320:
++ case gcv328:
++ case gcv420:
++ case gcv428:
++ hardware->type = gcvHARDWARE_2D;
++ break;
++
++ default:
++#if gcdMULTI_GPU_AFFINITY
++ hardware->type = (Core == gcvCORE_MAJOR) ? gcvHARDWARE_3D : gcvHARDWARE_OCL;
++#else
++ hardware->type = gcvHARDWARE_3D;
++#endif
++
++ if(hardware->identity.chipModel == gcv880 && hardware->identity.chipRevision == 0x5107)
++ {
++ /*set outstanding limit*/
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Os, Core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x00010;
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00414, axi_ot));
++ }
++
++
++ if ((((((gctUINT32) (hardware->identity.chipFeatures)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ))
++ {
++ hardware->type = (gceHARDWARE_TYPE) (hardware->type | gcvHARDWARE_2D);
++ }
++ }
++
++ hardware->powerBaseAddress
++ = ((hardware->identity.chipModel == gcv300)
++ && (hardware->identity.chipRevision < 0x2000))
++ ? 0x0100
++ : 0x0000;
++
++ /* _ResetGPU need powerBaseAddress. */
++ status = _ResetGPU(hardware, Os, Core);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++#if gcdMULTI_GPU
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0055C,
++#if gcdDISABLE_FE_L2
++ 0x00FFFFFF));
++#else
++ 0x00FFFF05));
++#endif
++
++#elif gcdMULTI_GPU_AFFINITY
++ control = ((((gctUINT32) (0x00FF0A05)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) << (0 ? 27:27)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0055C,
++ control));
++#endif
++
++ hardware->powerMutex = gcvNULL;
++
++ hardware->mmuVersion
++ = (((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 28:28)) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) );
++
++ /* Determine whether bug fixes #1 are present. */
++ hardware->extraEventStates = ((((gctUINT32) (hardware->identity.chipMinorFeatures1)) >> (0 ? 3:3) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))));
++
++ /* Check if big endian */
++ hardware->bigEndian = (*(gctUINT8 *)&data == 0xff);
++
++ /* Initialize the fast clear. */
++ gcmkONERROR(gckHARDWARE_SetFastClear(hardware, -1, -1));
++
++#if !gcdENABLE_128B_MERGE
++
++ if (((((gctUINT32) (hardware->identity.chipMinorFeatures2)) >> (0 ? 21:21) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))))
++ {
++ /* 128B merge is turned on by default. Disable it. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os, Core, 0x00558, 0));
++ }
++
++#endif
++
++ /* Set power state to ON. */
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++ hardware->lastWaitLink = ~0U;
++ hardware->lastEnd = ~0U;
++ hardware->globalSemaphore = gcvNULL;
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ hardware->powerOnFscaleVal = 64;
++#endif
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &hardware->powerMutex));
++ gcmkONERROR(gckOS_CreateSemaphore(Os, &hardware->globalSemaphore));
++ hardware->startIsr = gcvNULL;
++ hardware->stopIsr = gcvNULL;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _PowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++ gcmkONERROR(gckOS_AtomConstruct(Os, &hardware->pendingEvent));
++
++#if gcdLINK_QUEUE_SIZE
++ hardware->linkQueue.front = 0;
++ hardware->linkQueue.rear = 0;
++ hardware->linkQueue.count = 0;
++#endif
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Disable profiler by default */
++ hardware->gpuProfiler = gcvFALSE;
++
++#if defined(LINUX) || defined(__QNXNTO__) || defined(UNDERCE)
++ if (hardware->mmuVersion)
++ {
++ hardware->endAfterFlushMmuCache = gcvTRUE;
++ }
++ else
++#endif
++ {
++ hardware->endAfterFlushMmuCache = gcvFALSE;
++ }
++
++ gcmkONERROR(gckOS_QueryOption(Os, "mmu", (gctUINT32_PTR)&hardware->enableMMU));
++
++ hardware->minFscaleValue = 1;
++
++ /* Return pointer to the gckHARDWARE object. */
++ *Hardware = hardware;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Hardware=0x%x", *Hardware);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, Core, gcvFALSE, gcvFALSE));
++
++ if (hardware->globalSemaphore != gcvNULL)
++ {
++ /* Destroy the global semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Os,
++ hardware->globalSemaphore));
++ }
++
++ if (hardware->powerMutex != gcvNULL)
++ {
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ if (hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware->pendingEvent != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pendingEvent));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, hardware));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Destroy
++**
++** Destroy an gckHARDWARE object.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Destroy the power semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++
++ /* Destroy the power mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Hardware->os, Hardware->powerMutex));
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pendingEvent));
++
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ Hardware->os,
++ Hardware->functionBytes,
++ Hardware->functionPhysical,
++ Hardware->functionLogical
++ ));
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the object. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Hardware->os, Hardware));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_GetType
++**
++** Get the hardware type.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gceHARDWARE_TYPE * Type
++** Pointer to a variable that receives the type of hardware object.
++*/
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkVERIFY_ARGUMENT(Type != gcvNULL);
++
++ *Type = Hardware->type;
++
++ gcmkFOOTER_ARG("*Type=%d", *Type);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_InitializeHardware
++**
++** Initialize the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++ gctUINT32 chipRev;
++ gctUINT32 control;
++ gctUINT32 data;
++ gctUINT32 regPMC = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the chip revision register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00024,
++ &chipRev));
++
++ if (chipRev != Hardware->identity.chipRevision)
++ {
++ /* Chip is not there! */
++ gcmkONERROR(gcvSTATUS_CONTEXT_LOSSED);
++ }
++
++ /* Disable isolate GPU bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)))));
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ /* Reset memory counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ ~0U));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++
++ /* Get the system's physical base address. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Program the base addesses. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0041C,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00418,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00428,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00420,
++ baseAddress));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00424,
++ baseAddress));
++
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ /* Enable clock gating. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ if ((Hardware->identity.chipRevision == 0x4301)
++ || (Hardware->identity.chipRevision == 0x4302)
++ )
++ {
++ /* Disable stall module level clock gating for 4.3.0.1 and 4.3.0.2
++ ** revisions. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++
++#if gcdENABLE_3D
++ /* Disable PE clock gating on revs < 5.0 when HZ is present without a
++ ** bug fix. */
++ if ((Hardware->identity.chipRevision < 0x5000)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HZ)
++ && ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))))
++ )
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable PE clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ }
++
++#endif
++ }
++
++ if (Hardware->identity.chipModel == gcv4000 &&
++ ((Hardware->identity.chipRevision == 0x5208) || (Hardware->identity.chipRevision == 0x5222)))
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)))));
++ }
++
++ if (Hardware->identity.chipModel == gcv1000 &&
++ (Hardware->identity.chipRevision == 0x5039 ||
++ Hardware->identity.chipRevision == 0x5040))
++ {
++ gctUINT32 pulseEater;
++
++ pulseEater = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (pulseEater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)))));
++ }
++
++ if ((gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_HALTI2) == gcvSTATUS_FALSE)
++ || (Hardware->identity.chipRevision < 0x5422)
++ )
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:15) - (0 ? 15:15) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:15) - (0 ? 15:15) + 1))))))) << (0 ? 15:15)));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv2000, 0x5108))
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ &data));
++
++ /* Set FE bus to one, TX bus to zero */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00480,
++ data));
++ }
++
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Hardware,
++ Hardware->kernel->mmu->pageTableLogical));
++
++ if (Hardware->identity.chipModel >= gcv400
++ && Hardware->identity.chipModel != gcv420)
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable PA clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++
++ /* Limit 2D outstanding request. */
++ if (_IsHardwareMatch(Hardware, gcv880, 0x5107))
++ {
++ gctUINT32 axi_ot;
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &axi_ot));
++ axi_ot = (axi_ot & (~0xFF)) | 0x00010;
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00414, axi_ot));
++ }
++
++ if (Hardware->identity.chip2DControl & 0xFF)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (Hardware->identity.chip2DControl & 0xFF) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv1000, 0x5035))
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ &data));
++
++ /* Disable HZ-L2. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)));
++
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ data));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv4000, 0x5222))
++ {
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable TX clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv880, 0x5106))
++ {
++ Hardware->kernel->timeOut = 140 * 1000;
++ }
++
++ if (regPMC == 0)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &regPMC));
++ }
++
++ /* Disable RA HZ clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable RA EZ clock gating. */
++ regPMC = ((((gctUINT32) (regPMC)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++
++ if (regPMC != 0)
++ {
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ regPMC));
++ }
++
++ if (_IsHardwareMatch(Hardware, gcv2000, 0x5108)
++ || _IsHardwareMatch(Hardware, gcv320, 0x5007)
++ || _IsHardwareMatch(Hardware, gcv880, 0x5106)
++ || _IsHardwareMatch(Hardware, gcv400, 0x4645)
++ )
++ {
++ /* Update GPU AXI cache atttribute. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00008,
++ 0x00002200));
++ }
++
++
++ if ((Hardware->identity.chipRevision > 0x5420)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D))
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ /* Disable internal DFS. */
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++ }
++
++#if gcdDEBUG_MODULE_CLOCK_GATING
++ _ConfigureModuleLevelClockGating(Hardware);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*InternalSize=%lu *InternalBaseAddress=0x%08x "
++ "*InternalAlignment=0x%08x *ExternalSize=%lu "
++ "*ExternalBaseAddress=0x%08x *ExtenalAlignment=0x%08x "
++ "*HorizontalTileSize=%u *VerticalTileSize=%u",
++ gcmOPT_VALUE(InternalSize),
++ gcmOPT_VALUE(InternalBaseAddress),
++ gcmOPT_VALUE(InternalAlignment),
++ gcmOPT_VALUE(ExternalSize),
++ gcmOPT_VALUE(ExternalBaseAddress),
++ gcmOPT_VALUE(ExternalAlignment),
++ gcmOPT_VALUE(HorizontalTileSize),
++ gcmOPT_VALUE(VerticalTileSize));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++** Pointer to the identity structure.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ )
++{
++ gctUINT32 features;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Identity != gcvNULL);
++
++ /* Return chip model and revision. */
++ Identity->chipModel = Hardware->identity.chipModel;
++ Identity->chipRevision = Hardware->identity.chipRevision;
++
++ /* Return feature set. */
++ features = Hardware->identity.chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ /* Override fast clear by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ if ((((((gctUINT32) (features)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ))
++ {
++ /* Override compression by command line. */
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (Hardware->allowCompression) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 through GC500.2 and GC300,
++ ** since they did not have this bit. */
++ if (((Hardware->identity.chipModel == gcv500) && (Hardware->identity.chipRevision <= 2))
++ || (Hardware->identity.chipModel == gcv300)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ Identity->chipFeatures = features;
++
++ /* Return minor features. */
++ Identity->chipMinorFeatures = Hardware->identity.chipMinorFeatures;
++ Identity->chipMinorFeatures1 = Hardware->identity.chipMinorFeatures1;
++ Identity->chipMinorFeatures2 = Hardware->identity.chipMinorFeatures2;
++ Identity->chipMinorFeatures3 = Hardware->identity.chipMinorFeatures3;
++ Identity->chipMinorFeatures4 = Hardware->identity.chipMinorFeatures4;
++ Identity->chipMinorFeatures5 = Hardware->identity.chipMinorFeatures5;
++
++ /* Return chip specs. */
++ Identity->streamCount = Hardware->identity.streamCount;
++ Identity->registerMax = Hardware->identity.registerMax;
++ Identity->threadCount = Hardware->identity.threadCount;
++ Identity->shaderCoreCount = Hardware->identity.shaderCoreCount;
++ Identity->vertexCacheSize = Hardware->identity.vertexCacheSize;
++ Identity->vertexOutputBufferSize = Hardware->identity.vertexOutputBufferSize;
++ Identity->pixelPipes = Hardware->identity.pixelPipes;
++ Identity->instructionCount = Hardware->identity.instructionCount;
++ Identity->numConstants = Hardware->identity.numConstants;
++ Identity->bufferSize = Hardware->identity.bufferSize;
++ Identity->varyingsCount = Hardware->identity.varyingsCount;
++ Identity->superTileMode = Hardware->identity.superTileMode;
++#if gcdMULTI_GPU
++ Identity->gpuCoreCount = Hardware->identity.gpuCoreCount;
++#endif
++ Identity->chip2DControl = Hardware->identity.chip2DControl;
++
++ Identity->productID = Hardware->identity.productID;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Addres=0x%08x", Hardware, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 31:31)) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x1:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = (((((gctUINT32) (Address)) >> (0 ? 30:0)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1)))))) );
++ }
++ else
++ {
++ *Pool = gcvPOOL_SYSTEM;
++ *Offset = Address;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Pool=%d *Offset=0x%08x", *Pool, *Offset);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT32 Address
++** Hardware address of command buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes for the prefetch unit (until after the first LINK).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Bytes=%lu",
++ Hardware, Address, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Enable all events. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00014, ~0U));
++
++ /* Write address register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00654, Address));
++
++ /* Build control register. */
++ control = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) ((Bytes + 7) >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ /* Set big endian */
++ if (Hardware->bigEndian)
++ {
++ control |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 21:20) - (0 ? 21:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:20) - (0 ? 21:20) + 1))))))) << (0 ? 21:20)));
++ }
++
++ /* Write control register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00658, control));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Started command buffer @ 0x%08x",
++ Address);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_WaitLink
++**
++** Append a WAIT/LINK command sequence at the specified location in the command
++** queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** WAIT/LINK command sequence at or gcvNULL just to query the size of the
++** WAIT/LINK command sequence.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the WAIT/LINK command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the WAIT/LINK command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitOffset
++** Pointer to a variable that will receive the offset of the WAIT command
++** from the specified logcial pointer.
++** If 'WaitOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitSize
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command. If 'LinkSize' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctUINT32 * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctUINT32 * WaitSize
++ )
++{
++ static const gctUINT waitCount = 200;
++
++ gceSTATUS status;
++ gctUINT32 address;
++ gctUINT32_PTR logical;
++ gctUINT32 bytes;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x *Bytes=%lu",
++ Hardware, Logical, Offset, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical != gcvNULL) || (Bytes != gcvNULL));
++
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ bytes = gcmALIGN(Offset + 40, 8) - Offset;
++#else
++ /* Compute number of bytes required. */
++ bytes = gcmALIGN(Offset + 16, 8) - Offset;
++#endif
++ /* Cast the input pointer. */
++ logical = (gctUINT32_PTR) Logical;
++
++ if (logical != gcvNULL)
++ {
++ /* Not enough space? */
++ if (*Bytes < bytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Convert logical into hardware specific address. */
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, gcvFALSE, &address));
++
++ /* Store the WAIT/LINK address. */
++ Hardware->lastWaitLink = address;
++
++ /* Append WAIT(count). */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (waitCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | gcvCORE_3D_0_MASK;
++
++ logical[3] = 0;
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[5] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ logical[6] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | gcvCORE_3D_ALL_MASK;
++
++ logical[7] = 0;
++
++ /* Append LINK(2, address). */
++ logical[8] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[9] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", address + 8, logical[3]);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 16, address, bytes
++ );
++#else
++
++ /* Append LINK(2, address). */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3] = address;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: WAIT %u", address, waitCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: LINK 0x%08x, #%lu",
++ address + 8, address, bytes
++ );
++#endif
++ if (WaitOffset != gcvNULL)
++ {
++ /* Return the offset pointer to WAIT command. */
++ *WaitOffset = 0;
++ }
++
++ if (WaitSize != gcvNULL)
++ {
++ /* Return number of bytes used by the WAIT command. */
++#if gcdMULTI_GPU && !gcdDISABLE_FE_L2
++ *WaitSize = 32;
++#else
++ *WaitSize = 8;
++#endif
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the WAIT/LINK command
++ ** sequence. */
++ *Bytes = bytes;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *WaitOffset=0x%x *WaitSize=%lu",
++ gcmOPT_VALUE(Bytes), gcmOPT_VALUE(WaitOffset),
++ gcmOPT_VALUE(WaitSize));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_End
++**
++** Append an END command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** END command at or gcvNULL just to query the size of the END command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append END. */
++ logical[0] =
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: END", Logical);
++
++ /* Make sure the CPU writes out the data to memory. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Hardware, logical, gcvFALSE, &address));
++
++ Hardware->lastEnd = address;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdMULTI_GPU
++gceSTATUS
++gckHARDWARE_ChipEnable(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gceCORE_3D_MASK ChipEnable,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x ChipEnable=0x%x *Bytes=%lu",
++ Hardware, Logical, ChipEnable, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append CHIPENABLE. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x0D & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ChipEnable;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: CHIPENABLE 0x%x", Logical, ChipEnable);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CHIPENABLE command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_Nop
++**
++** Append a NOP command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** NOP command at or gcvNULL just to query the size of the NOP command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the NOP command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the NOP command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ /* Append NOP. */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x03 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "0x%x: NOP", Logical);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the NOP command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Event
++**
++** Append an EVENT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the EVENT command at or gcvNULL just to query the size of the EVENT
++** command.
++**
++** gctUINT8 Event
++** Event ID to program.
++**
++** gceKERNEL_WHERE FromWhere
++** Location of the pipe to send the event.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the EVENT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT size;
++ gctUINT32 destination = 0;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Event=%u FromWhere=%d *Bytes=%lu",
++ Hardware, Logical, Event, FromWhere, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++ gcmkVERIFY_ARGUMENT(Event < 32);
++
++#if gcdMULTI_GPU
++ if (FromWhere == gcvKERNEL_COMMAND) FromWhere = gcvKERNEL_PIXEL;
++#endif
++
++ /* Determine the size of the command. */
++
++ size = (Hardware->extraEventStates && (FromWhere == gcvKERNEL_PIXEL))
++ ? gcmALIGN(8 + (1 + 5) * 4, 8) /* EVENT + 5 STATES */
++ : 8;
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < size)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ switch (FromWhere)
++ {
++ case gcvKERNEL_COMMAND:
++ /* From command processor. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ break;
++
++ case gcvKERNEL_PIXEL:
++ /* From pixel engine. */
++ destination = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Append EVENT(Event, destiantion). */
++ logical[0] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1] = ((((gctUINT32) (destination)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (Event) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ /* Make sure the event ID gets written out before GPU can access it. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ {
++ gctUINT32 phys;
++ gckOS_GetPhysicalAddress(Hardware->os, Logical, &phys);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%08x: EVENT %d", phys, Event);
++ }
++#endif
++
++ /* Append the extra states. These are needed for the chips that do not
++ ** support back-to-back events due to the async interface. The extra
++ ** states add the necessary delay to ensure that event IDs do not
++ ** collide. */
++ if (size > 8)
++ {
++ logical[2] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0100) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++ logical[3] = 0;
++ logical[4] = 0;
++ logical[5] = 0;
++ logical[6] = 0;
++ logical[7] = 0;
++ }
++
++#if gcdINTERRUPT_STATISTIC
++ if (Event < gcmCOUNTOF(Hardware->kernel->eventObj->queues))
++ {
++ gckOS_AtomSetMask(Hardware->pendingEvent, 1 << Event);
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT command. */
++ *Bytes = size;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_PipeSelect
++**
++** Append a PIPESELECT command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the PIPESELECT command at or gcvNULL just to query the size of the
++** PIPESELECT command.
++**
++** gcePIPE_SELECT Pipe
++** Pipe value to select.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the PIPESELECT command.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the PIPESELECT command. If 'Bytes' is gcvNULL, nothing will be
++** returned.
++*/
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Pipe=%d *Bytes=%lu",
++ Hardware, Logical, Pipe, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ /* Append a PipeSelect. */
++ if (Logical != gcvNULL)
++ {
++ gctUINT32 flush, stall;
++
++ if (*Bytes < 32)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ flush = (Pipe == gcvPIPE_2D)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++
++ stall = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LoadState(AQFlush, 1), flush. */
++ logical[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[1]
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical, flush);
++
++ /* LoadState(AQSempahore, 1), stall. */
++ logical[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ logical[3]
++ = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: SEMAPHORE 0x%x", logical + 2, stall);
++
++ /* Stall, stall. */
++ logical[4] = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ logical[5] = stall;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: STALL 0x%x", logical + 4, stall);
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ logical[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ logical[7] = (Pipe == gcvPIPE_2D)
++ ? 0x1
++ : 0x0;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: PIPE %d", logical + 6, Pipe);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the PIPESELECT command. */
++ *Bytes = 32;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Link
++**
++** Append a LINK command at the specified location in the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** the LINK command at or gcvNULL just to query the size of the LINK
++** command.
++**
++** gctUINT32 FetchAddress
++** Hardware address of destination of LINK.
++**
++** gctSIZE_T FetchSize
++** Number of bytes in destination of LINK.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the LINK command. If
++** 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the LINK command. If 'Bytes' is gcvNULL, nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT32 FetchSize,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctUINT32 link;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x FetchAddress=0x%x FetchSize=%lu "
++ "*Bytes=%lu",
++ Hardware, Logical, FetchAddress, FetchSize,
++ gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT((Logical == gcvNULL) || (Bytes != gcvNULL));
++
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < 8)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical + 1, FetchAddress));
++
++ /* Make sure the address got written before the LINK command. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical + 1));
++
++ /* Compute number of 64-byte aligned bytes to fetch. */
++ bytes = gcmALIGN(FetchAddress + FetchSize, 64) - FetchAddress;
++
++ /* Append LINK(bytes / 8), FetchAddress. */
++ link = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (bytes >> 3) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ gcmkONERROR(
++ gckOS_WriteMemory(Hardware->os, logical, link));
++
++ /* Memory barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, logical));
++
++#if gcdLINK_QUEUE_SIZE && !gcdPROCESS_ADDRESS_SPACE
++ if ((Hardware->kernel->virtualCommandBuffer)
++ && (Hardware->kernel->stuckDump > 2)
++ )
++ {
++ gctBOOL in;
++
++ gcmkVERIFY_OK(gckCOMMAND_AddressInKernelCommandBuffer(
++ Hardware->kernel->command, FetchAddress, &in));
++
++ if (in == gcvFALSE)
++ {
++ /* Record user command buffer and context buffer link
++ ** information for stuck dump.
++ **/
++ gckLINKQUEUE_Enqueue(
++ &Hardware->linkQueue, FetchAddress, FetchAddress + (gctUINT)bytes);
++ }
++ }
++#endif
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_UpdateQueueTail
++**
++** Update the tail of the command queue.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the start of the command queue.
++**
++** gctUINT32 Offset
++** Offset into the command queue of the tail (last command).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x Offset=0x%08x",
++ Hardware, Logical, Offset);
++
++ /* Verify the hardware. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Force a barrier. */
++ gcmkONERROR(
++ gckOS_MemoryBarrier(Hardware->os, Logical));
++
++ /* Notify gckKERNEL object of change. */
++#if gcdMULTI_GPU
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ 0,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++#else
++ gcmkONERROR(
++ gckKERNEL_Notify(Hardware->kernel,
++ gcvNOTIFY_COMMAND_QUEUE,
++ gcvFALSE));
++#endif
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ gcmkONERROR(gcvSTATUS_DEVICE);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the memory in user space.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x InUserSpace=%d",
++ Hardware, Logical, InUserSpace);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Convert logical address into a physical address. */
++ if (InUserSpace)
++ {
++ gcmkONERROR(gckOS_UserLogicalToPhysical(Hardware->os, Logical, &address));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++ }
++
++ /* For old MMU, get GPU address according to baseAddress. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, &baseAddress));
++
++ /* Subtract base address to get a GPU address. */
++ gcmkASSERT(address >= baseAddress);
++ address -= baseAddress;
++ }
++
++ /* Return hardware specific address. */
++ *Address = (Hardware->mmuVersion == 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (address) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)))
++ : address;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Interrupt
++**
++** Process an interrupt.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL InterruptValid
++** If gcvTRUE, this function will read the interrupt acknowledge
++** register, stores the data, and return whether or not the interrupt
++** is ours or not. If gcvFALSE, this functions will read the interrupt
++** acknowledge register and combine it with any stored value to handle
++** the event notifications.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++#if gcdMULTI_GPU
++ IN gctUINT CoreId,
++#endif
++ IN gctBOOL InterruptValid
++ )
++{
++ gckEVENT eventObj;
++ gctUINT32 data = 0;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x InterruptValid=%d", Hardware, InterruptValid);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Extract gckEVENT object. */
++ eventObj = Hardware->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ if (InterruptValid)
++ {
++ /* Read AQIntrAcknowledge register. */
++#if gcdMULTI_GPU
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ CoreId,
++ 0x00010,
++ &data));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++ }
++#else
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00010,
++ &data));
++#endif
++
++ if (data == 0)
++ {
++ /* Not our interrupt. */
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ }
++ else
++ {
++
++#if gcdINTERRUPT_STATISTIC
++ gckOS_AtomClearMask(Hardware->pendingEvent, data);
++#endif
++
++ /* Inform gckEVENT of the interrupt. */
++ status = gckEVENT_Interrupt(eventObj,
++#if gcdMULTI_GPU
++ CoreId,
++#endif
++ data);
++ }
++ }
++ else
++ {
++ /* Handle events. */
++ status = gckEVENT_Notify(eventObj, 0);
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryCommandBuffer
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Alignment
++** Pointer to a variable receiving the alignment for each command.
++**
++** gctSIZE_T * ReservedHead
++** Pointer to a variable receiving the number of reserved bytes at the
++** head of each command buffer.
++**
++** gctSIZE_T * ReservedTail
++** Pointer to a variable receiving the number of bytes reserved at the
++** tail of each command buffer.
++*/
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Alignment,
++ OUT gctUINT32 * ReservedHead,
++ OUT gctUINT32 * ReservedTail
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Alignment != gcvNULL)
++ {
++ /* Align every 8 bytes. */
++ *Alignment = 8;
++ }
++
++ if (ReservedHead != gcvNULL)
++ {
++ /* Reserve space for SelectPipe(). */
++ *ReservedHead = 32;
++ }
++
++ if (ReservedTail != gcvNULL)
++ {
++ /* Reserve space for Link(). */
++ *ReservedTail = 8;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Alignment=%lu *ReservedHead=%lu *ReservedTail=%lu",
++ gcmOPT_VALUE(Alignment), gcmOPT_VALUE(ReservedHead),
++ gcmOPT_VALUE(ReservedTail));
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = 1U << 31;
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*SystemSize=%lu *SystemBaseAddress=%lu",
++ gcmOPT_VALUE(SystemSize), gcmOPT_VALUE(SystemBaseAddress));
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_3D
++/*******************************************************************************
++**
++** gckHARDWARE_QueryShaderCaps
++**
++** Query the shader capabilities.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT * VertexUniforms
++** Pointer to a variable receiving the number of uniforms in the vertex
++** shader.
++**
++** gctUINT * FragmentUniforms
++** Pointer to a variable receiving the number of uniforms in the
++** fragment shader.
++**
++** gctBOOL * UnifiedUnforms
++** Pointer to a variable receiving whether the uniformas are unified.
++*/
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctBOOL * UnifiedUnforms
++ )
++{
++ gctBOOL unifiedConst;
++ gctUINT32 vsConstMax;
++ gctUINT32 psConstMax;
++ gctUINT32 vsConstBase;
++ gctUINT32 psConstBase;
++ gctUINT32 ConstMax;
++
++ gcmkHEADER_ARG("Hardware=0x%x VertexUniforms=0x%x "
++ "FragmentUniforms=0x%x UnifiedUnforms=0x%x",
++ Hardware, VertexUniforms,
++ FragmentUniforms, UnifiedUnforms);
++
++ {if (Hardware->identity.numConstants > 256){ unifiedConst = gcvTRUE; vsConstBase = 0xC000; psConstBase = 0xC000; ConstMax = Hardware->identity.numConstants; vsConstMax = 256; psConstMax = ConstMax - vsConstMax;}else if (Hardware->identity.numConstants == 256){ if (Hardware->identity.chipModel == gcv2000 && Hardware->identity.chipRevision == 0x5118) { unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 256; psConstMax = 64; ConstMax = 320; } else { unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 256; psConstMax = 256; ConstMax = 512; }}else{ unifiedConst = gcvFALSE; vsConstBase = 0x1400; psConstBase = 0x1C00; vsConstMax = 168; psConstMax = 64; ConstMax = 232;}};
++
++ if (VertexUniforms != gcvNULL)
++ {
++ /* Return the vs shader const count. */
++ *VertexUniforms = vsConstMax;
++ }
++
++ if (FragmentUniforms != gcvNULL)
++ {
++ /* Return the ps shader const count. */
++ *FragmentUniforms = psConstMax;
++ }
++
++ if (UnifiedUnforms != gcvNULL)
++ {
++ /* Return whether the uniformas are unified. */
++ *UnifiedUnforms = unifiedConst;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++ gctUINT32 idle;
++ gctUINT32 timer = 0, delay = 1;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x", Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert the logical address into physical address. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, Logical, &address));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Setting page table to 0x%08X",
++ address);
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00400,
++ address));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00410,
++ address));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00404,
++ address));
++
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00408,
++ address));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0040C,
++ address));
++ }
++ else if (Hardware->enableMMU == gcvTRUE)
++ {
++ /* Execute prepared command sequence. */
++ gcmkONERROR(gckHARDWARE_Execute(
++ Hardware,
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].address,
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].bytes
++ ));
++
++ /* Wait until MMU configure finishes. */
++ do
++ {
++ gckOS_Delay(Hardware->os, delay);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(
++ Hardware->os,
++ Hardware->core,
++ 0x00004,
++ &idle));
++
++ timer += delay;
++ delay *= 2;
++
++#if gcdGPU_TIMEOUT
++ if (timer >= Hardware->kernel->timeOut)
++ {
++ /* Even if hardware is not reset correctly, let software
++ ** continue to avoid software stuck. Software will timeout again
++ ** and try to recover GPU in next timeout.
++ */
++ gcmkONERROR(gcvSTATUS_DEVICE);
++ }
++#endif
++ }
++ while (!(((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ));
++
++ /* Enable MMU. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os,
++ Hardware->core,
++ 0x0018C,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (gcvTRUE) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command;
++ gctUINT32_PTR buffer;
++ gctUINT32 bufferSize;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 flushSize;
++ gctUINT32 count;
++ gctUINT32 physical;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Hardware->kernel->command;
++
++ /* Flush the memory controller. */
++ if (Hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, 8, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ gcmkONERROR(gckCOMMAND_Execute(command, 8));
++ }
++ else
++ {
++ flushSize = 16 * 4;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, flushSize, &pointer, &bufferSize
++ ));
++
++ buffer = (gctUINT32_PTR) pointer;
++
++ count = ((gctUINT)bufferSize - flushSize + 7) >> 3;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(command->os, buffer, &physical));
++
++ /* Flush cache. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[3]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[4]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[5]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[6]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[7]
++ = physical + 8 * gcmSIZEOF(gctUINT32);
++
++ /* Flush MMU cache. */
++ buffer[8]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[9]
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ buffer[10]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[11]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ buffer[12]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ buffer[13]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ buffer[14]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[15]
++ = physical + flushSize;
++
++ gcmkONERROR(gckCOMMAND_Execute(command, flushSize));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetMMUStates(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gceSTATUS status;
++ gctUINT32 config, address;
++ gctUINT32_PTR buffer;
++ gctBOOL ace;
++ gctUINT32 reserveBytes = 16 + 4 * 4;
++
++ gctBOOL config2D;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Hardware->mmuVersion != 0);
++
++ ace = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_ACE);
++
++ if (ace)
++ {
++ reserveBytes += 8;
++ }
++
++ config2D = gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_3D)
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_PIPE_2D);
++
++ if (config2D)
++ {
++ reserveBytes +=
++ /* Pipe Select. */
++ 4 * 4
++ /* Configure MMU States. */
++ + 4 * 4
++ /* Semaphore stall */
++ + 4 * 8;
++ }
++
++ /* Convert logical address into physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, MtlbAddress, &config));
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Hardware->os, SafeAddress, &address));
++
++ if (address & 0x3F)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ switch (Mode)
++ {
++ case gcvMMU_MODE_1K:
++ if (config & 0x3FF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ case gcvMMU_MODE_4K:
++ if (config & 0xFFF)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (Logical != gcvNULL)
++ {
++ buffer = Logical;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = config;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = address;
++
++ if (ace)
++ {
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0068) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0;
++ }
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++
++ if (config2D)
++ {
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0x1;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = config;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0060) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = address;
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++
++ /* LoadState(AQPipeSelect, 1), pipe. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E00) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++ = 0x0;
++
++ do{*buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))); *buffer++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));} while(0);;
++ }
++
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ *Bytes = reserveBytes;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_NO();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdPROCESS_ADDRESS_SPACE
++/*******************************************************************************
++**
++** gckHARDWARE_ConfigMMU
++**
++** Append a MMU Configuration command sequence at the specified location in the command
++** queue. That command sequence consists of mmu configuration, LINK and WAIT/LINK.
++** LINK is fetched and paresed with new mmu configuration.
++**
++** If MMU Configuration is not changed between commit, change last WAIT/LINK to
++** link to ENTRY.
++**
++** -+-----------+-----------+-----------------------------------------
++** | WAIT/LINK | WAIT/LINK |
++** -+-----------+-----------+-----------------------------------------
++** | /|\
++** \|/ |
++** +--------------------+
++** | ENTRY | ... | LINK |
++** +--------------------+
++**
++** If MMU Configuration is changed between commit, change last WAIT/LINK to
++** link to MMU CONFIGURATION command sequence, and there are an EVNET and
++** an END at the end of this command sequence, when interrupt handler
++** receives this event, it will start FE at ENTRY to continue the command
++** buffer execution.
++**
++** -+-----------+-------------------+---------+---------+-----------+--
++** | WAIT/LINK | MMU CONFIGURATION | EVENT | END | WAIT/LINK |
++** -+-----------+-------------------+---------+---------+-----------+--
++** | /|\ /|\
++** +-------------+ |
++** +--------------------+
++** | ENTRY | ... | LINK |
++** +--------------------+
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command queue to append
++** command sequence at or gcvNULL just to query the size of the
++** command sequence.
++**
++** gctPOINTER MtlbLogical
++** Pointer to the current Master TLB.
++**
++** gctUINT32 Offset
++** Offset into command buffer required for alignment.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the command
++** sequence. If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** by the command sequence. If 'Bytes' is gcvNULL, nothing will
++** be returned.
++**
++** gctUINT32 * WaitLinkOffset
++** Pointer to a variable that will receive the offset of the WAIT/LINK command
++** from the specified logcial pointer.
++** If 'WaitLinkOffset' is gcvNULL nothing will be returned.
++**
++** gctSIZE_T * WaitLinkBytes
++** Pointer to a variable that will receive the number of bytes used by
++** the WAIT command.
++** If 'WaitLinkBytes' is gcvNULL nothing will be returned.
++*/
++gceSTATUS
++gckHARDWARE_ConfigMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER MtlbLogical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctSIZE_T * WaitLinkOffset,
++ OUT gctSIZE_T * WaitLinkBytes
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes, bytesAligned;
++ gctUINT32 config;
++ gctUINT32_PTR buffer = (gctUINT32_PTR) Logical;
++ gctUINT32 physical;
++ gctUINT32 event;
++
++ gcmkHEADER_ARG("Hardware=0x%08X Logical=0x%08x MtlbLogical=0x%08X",
++ Hardware, Logical, MtlbLogical);
++
++ bytes
++ /* Flush cache states. */
++ = 18 * 4
++ /* MMU configuration states. */
++ + 6 * 4
++ /* EVENT. */
++ + 2 * 4
++ /* END. */
++ + 2 * 4
++ /* WAIT/LINK. */
++ + 4 * 4;
++
++ /* Compute number of bytes required. */
++ bytesAligned = gcmALIGN(Offset + bytes, 8) - Offset;
++
++ if (buffer != gcvNULL)
++ {
++ if (MtlbLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get physical address of this command buffer segment. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, buffer, &physical));
++
++ /* Get physical address of Master TLB. */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Hardware->os, MtlbLogical, &config));
++
++ config |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++
++ /* Flush cache. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Flush tile status cache. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = physical + 10 * gcmSIZEOF(gctUINT32);
++
++ /* Configure MMU. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = (((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) & ((((gctUINT32) (~0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))));
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* LINK to next slot to flush FE FIFO. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = physical + 18 * 4;
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0061) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *buffer++
++ = config;
++
++ /* Arm the PE-FE Semaphore. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* STALL FE until PE is done flushing. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Event 29. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ event = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ event = ((((gctUINT32) (event)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (29) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++
++ *buffer++
++ = event;
++
++ /* Append END. */
++ *buffer++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ *Bytes = bytesAligned;
++ }
++
++ if (WaitLinkOffset != gcvNULL)
++ {
++ *WaitLinkOffset = bytes - 4 * 4;
++ }
++
++ if (WaitLinkBytes != gcvNULL)
++ {
++#if gcdMULTI_GPU
++ *WaitLinkBytes = 40;
++#else
++ *WaitLinkBytes = 4 * 4;
++#endif
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Index=%u Offset=%u", Hardware, Index, Offset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ *Address = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1))))))) << (0 ? 31:31)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0))) | (((gctUINT32) ((gctUINT32) (Offset | (Index << 12)) & ((gctUINT32) ((((1 ? 30:0) - (0 ? 30:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 30:0) - (0 ? 30:0) + 1))))))) << (0 ? 30:0)));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle = 0;
++ gctINT retry, poll, pollCount;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Wait=%d", Hardware, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++
++ /* If we have to wait, try 100 polls per millisecond. */
++ pollCount = Wait ? 100 : 1;
++
++ /* At most, try for 1 second. */
++ for (retry = 0; retry < 1000; ++retry)
++ {
++ /* If we have to wait, try 100 polls per millisecond. */
++ for (poll = pollCount; poll > 0; --poll)
++ {
++ /* Read register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++
++ /* See if we have to wait for FE idle. */
++ if (_IsGPUIdle(idle)
++ && (address == Hardware->lastEnd + 8)
++ )
++ {
++ /* FE is idle. */
++ break;
++ }
++ }
++
++ /* Check if we need to wait for FE and FE is busy. */
++ if (Wait && !_IsGPUIdle(idle))
++ {
++ /* Wait a little. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "%s: Waiting for idle: 0x%08X",
++ __FUNCTION__, idle);
++
++ gcmkVERIFY_OK(gckOS_Delay(Hardware->os, 1));
++ }
++ else
++ {
++ break;
++ }
++ }
++
++ /* Return idle to caller. */
++ *Data = idle;
++
++#if defined(EMULATOR)
++ /* Wait a little while until CModel FE gets END.
++ * END is supposed to be appended by caller.
++ */
++ gckOS_Delay(gcvNULL, 100);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gctUINT32 pipe;
++ gctUINT32 flush = 0;
++ gctBOOL flushTileStatus;
++ gctUINT32_PTR logical = (gctUINT32_PTR) Logical;
++ gceSTATUS status;
++ gctUINT32 reserveBytes
++ /* Semaphore/Stall */
++ = 4 * gcmSIZEOF(gctUINT32);
++
++ gcmkHEADER_ARG("Hardware=0x%x Flush=0x%x Logical=0x%x *Bytes=%lu",
++ Hardware, Flush, Logical, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get current pipe. */
++ pipe = Hardware->kernel->command->pipeSelect;
++
++ /* Flush tile status cache. */
++ flushTileStatus = Flush & gcvFLUSH_TILE_STATUS;
++
++ /* Flush 3D color cache. */
++ if ((Flush & gcvFLUSH_COLOR) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)));
++ }
++
++ /* Flush 3D depth cache. */
++ if ((Flush & gcvFLUSH_DEPTH) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Flush 3D texture cache. */
++ if ((Flush & gcvFLUSH_TEXTURE) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)));
++ }
++
++ /* Flush 2D cache. */
++ if ((Flush & gcvFLUSH_2D) && (pipe == 0x1))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)));
++ }
++
++#if gcdMULTI_GPU
++ /* Flush L2 cache. */
++ if ((Flush & gcvFLUSH_L2) && (pipe == 0x0))
++ {
++ flush |= ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++#endif
++
++ /* Determine reserve bytes. */
++ if (flush)
++ {
++ reserveBytes += 2 * gcmSIZEOF(gctUINT32);
++ }
++
++ if (flushTileStatus)
++ {
++ reserveBytes += 2 * gcmSIZEOF(gctUINT32);
++ }
++
++ /* See if there is a valid flush. */
++ if ((flush == 0) && (flushTileStatus == gcvFALSE))
++ {
++ if (Bytes != gcvNULL)
++ {
++ /* No bytes required. */
++ *Bytes = 0;
++ }
++ }
++
++ else
++ {
++ /* Copy to command queue. */
++ if (Logical != gcvNULL)
++ {
++ if (*Bytes < reserveBytes)
++ {
++ /* Command queue too small. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++
++ if (flush)
++ {
++ /* Append LOAD_STATE to AQFlush. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E03) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *logical++
++ = flush;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH 0x%x", logical - 1, flush);
++ }
++
++ if (flushTileStatus)
++ {
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0594) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "0x%x: FLUSH TILE STATUS 0x%x", logical - 1, logical[-1]);
++ }
++
++ /* Semaphore. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E02) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++
++ /* Stall. */
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x09 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++
++ *logical++
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) (0x05 & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) (0x07 & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* bytes required. */
++ *Bytes = reserveBytes;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ )
++{
++#if gcdENABLE_3D
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Enable=%d Compression=%d",
++ Hardware, Enable, Compression);
++
++ /* Only process if fast clear is available. */
++ if ((((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ if (Enable == -1)
++ {
++ /* Determine automatic value for fast clear. */
++ Enable = ((Hardware->identity.chipModel != gcv500)
++ || (Hardware->identity.chipRevision >= 3)
++ ) ? 1 : 0;
++ }
++
++ if (Compression == -1)
++ {
++ /* Determine automatic value for compression. */
++ Compression = Enable
++ & (((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) );
++ }
++
++ /* Read AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00414, &debug));
++
++ /* Set fast clear bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++ if (
++ ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1))))))) ||
++ (Hardware->identity.chipModel >= gcv4000))
++ {
++ /* Set compression bypass. */
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21))) | (((gctUINT32) ((gctUINT32) (Compression == 0) & ((gctUINT32) ((((1 ? 21:21) - (0 ? 21:21) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 21:21) - (0 ? 21:21) + 1))))))) << (0 ? 21:21)));
++ }
++
++ /* Write back AQMemoryDebug register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00414,
++ debug));
++
++ /* Store fast clear and comprersison flags. */
++ Hardware->allowFastClear = Enable;
++ Hardware->allowCompression = Compression;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "FastClear=%d Compression=%d", Enable, Compression);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++}
++gcePOWER_FLAGS;
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++static gctCONST_STRING
++_PowerEnum(gceCHIPPOWERSTATE State)
++{
++ const gctCONST_STRING states[] =
++ {
++ gcmSTRING(gcvPOWER_ON),
++ gcmSTRING(gcvPOWER_OFF),
++ gcmSTRING(gcvPOWER_IDLE),
++ gcmSTRING(gcvPOWER_SUSPEND),
++ gcmSTRING(gcvPOWER_SUSPEND_ATPOWERON),
++ gcmSTRING(gcvPOWER_OFF_ATPOWERON),
++ gcmSTRING(gcvPOWER_IDLE_BROADCAST),
++ gcmSTRING(gcvPOWER_SUSPEND_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_BROADCAST),
++ gcmSTRING(gcvPOWER_OFF_RECOVERY),
++ gcmSTRING(gcvPOWER_OFF_TIMEOUT),
++ gcmSTRING(gcvPOWER_ON_AUTO)
++ };
++
++ if ((State >= gcvPOWER_ON) && (State <= gcvPOWER_ON_AUTO))
++ {
++ return states[State - gcvPOWER_ON];
++ }
++
++ return "unknown";
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag, clock;
++ gctPOINTER buffer;
++ gctUINT32 bytes, requested;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL broadcast = gcvFALSE;
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++ gctUINT32 process, thread;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL commandStarted = gcvFALSE;
++ gctBOOL isrStarted = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++ gctBOOL global = gcvFALSE;
++ gctBOOL globalAcquired = gcvFALSE;
++ gctBOOL configMmu = gcvFALSE;
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_RELEASE,
++ /* OFF */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ /* Clocks. */
++ static const gctUINT clocks[4] =
++ {
++ /* gcvPOWER_ON */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (64) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_OFF */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_IDLE */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++
++ /* gcvPOWER_SUSPEND */
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))),
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d(%s)",
++ State, _PowerEnum(State));
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ if (Hardware->powerManagement == gcvFALSE
++ && State != gcvPOWER_ON
++ )
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State != gcvPOWER_ON)
++ {
++ /* Called from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ status = gcvSTATUS_INVALID_REQUEST;
++ goto OnError;
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ clock = clocks[State];
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (State == gcvPOWER_ON)
++ {
++ clock = ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (Hardware->powerOnFscaleVal) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)));
++ }
++#endif
++
++ if (State == gcvPOWER_SUSPEND && Hardware->chipPowerState == gcvPOWER_OFF && broadcast)
++ {
++#if gcdPOWER_SUSPEND_WHEN_IDLE
++ /* Do nothing */
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++#else
++ /* Clock should be on when switch power from off to suspend */
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) ;
++#endif
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE or SUSPEND. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Power Off GPU[%d] at %u [supposed to be at %u]",
++ Hardware->core, currentTime, Hardware->powerOffTime);
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* If this is an internal power management, we have to check if we can grab
++ ** the global power semaphore. If we cannot, we have to wait until the
++ ** external world changes power management. */
++ if (!global)
++ {
++ /* Try to acquire the global semaphore. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Called from thread routine which should NEVER sleep.*/
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Release the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Releasing the power mutex.");
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ /* Wait for the semaphore. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Waiting for global semaphore.");
++ gcmkONERROR(gckOS_AcquireSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvTRUE;
++
++ /* Acquire the power mutex. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Reacquiring the power mutex.");
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ mutexAcquired = gcvTRUE;
++
++ /* chipPowerState may be changed by external world during the time
++ ** we give up powerMutex, so updating flag now is necessary. */
++ flag = flags[Hardware->chipPowerState][State];
++
++ if (flag == 0)
++ {
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++ mutexAcquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Error. */
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore again. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ else
++ {
++ if (State == gcvPOWER_OFF || State == gcvPOWER_SUSPEND || State == gcvPOWER_IDLE)
++ {
++ /* Acquire the global semaphore if it has not been acquired. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status == gcvSTATUS_OK)
++ {
++ globalAcquired = gcvTRUE;
++ }
++ else if (status != gcvSTATUS_TIMEOUT)
++ {
++ /* Other errors. */
++ gcmkONERROR(status);
++ }
++ /* Ignore gcvSTATUS_TIMEOUT and leave globalAcquired as gcvFALSE.
++ ** gcvSTATUS_TIMEOUT means global semaphore has already
++ ** been acquired before this operation, so even if we fail,
++ ** we should not release it in our error handling. It should be
++ ** released by the next successful global gcvPOWER_ON. */
++ }
++
++ /* Global power management can't be aborted, so sync with
++ ** proceeding last commit. */
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ for (;;)
++ {
++ /* Check if GPU is present and awake. */
++ status = _IsGPUPresent(Hardware);
++
++ /* Check if the GPU is not responding. */
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Turn off the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvFALSE, gcvFALSE));
++
++ Hardware->clockState = gcvFALSE;
++ Hardware->powerState = gcvFALSE;
++
++ /* Wait a little. */
++ gckOS_Delay(os, 1);
++
++ /* Turn on the power and clock. */
++ gcmkONERROR(gckOS_SetGPUPower(os, Hardware->core, gcvTRUE, gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++
++ /* We need to initialize the hardware and start the command
++ * processor. */
++ flag |= gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_START;
++ }
++ else
++ {
++ /* Test for error. */
++ gcmkONERROR(status);
++
++ /* Break out of loop. */
++ break;
++ }
++ }
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ gctBOOL idle;
++ gctINT32 atomValue;
++
++ /* For global operation, all pending commits have already been
++ ** blocked by globalSemaphore or powerSemaphore.*/
++ if (!global)
++ {
++ /* Check commit atom. */
++ gcmkONERROR(gckOS_AtomGet(os, command->atomCommit, &atomValue));
++
++ if (atomValue > 0)
++ {
++ /* Commits are pending - abort power management. */
++ status = broadcast ? gcvSTATUS_CHIP_NOT_READY
++ : gcvSTATUS_MORE_DATA;
++ goto OnError;
++ }
++ }
++
++ if (broadcast)
++ {
++ /* Check for idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Hardware, &idle));
++
++ if (!idle)
++ {
++ status = gcvSTATUS_CHIP_NOT_READY;
++ goto OnError;
++ }
++ }
++
++ else
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvTRUE));
++ commitEntered = gcvTRUE;
++
++ /* Get the size of the flush command. */
++ gcmkONERROR(gckHARDWARE_Flush(Hardware,
++ gcvFLUSH_ALL,
++ gcvNULL,
++ &requested));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ requested,
++ &buffer,
++ &bytes));
++
++ /* Append a flush. */
++ gcmkONERROR(gckHARDWARE_Flush(
++ Hardware, gcvFLUSH_ALL, buffer, &bytes
++ ));
++
++ /* Execute the command queue. */
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ commitEntered = gcvFALSE;
++
++ /* Wait to finish all commands. */
++#if gcdMULTI_GPU
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE, gcvCORE_3D_ALL_MASK));
++#else
++ gcmkONERROR(gckCOMMAND_Stall(command, gcvTRUE));
++#endif
++ }
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++ }
++
++ if (flag & gcvPOWER_FLAG_STOP)
++ {
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ /* Stop the Isr. */
++ if (Hardware->stopIsr)
++ {
++ gcmkONERROR(Hardware->stopIsr(Hardware->isrContext));
++ }
++ }
++
++ /* Flush Cache before Power Off. */
++ if (flag & gcvPOWER_FLAG_POWER_OFF)
++ {
++ if (Hardware->clockState == gcvFALSE)
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ gcvTRUE,
++ gcvTRUE));
++
++ Hardware->clockState = gcvTRUE;
++
++ if (gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE)
++ {
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clocks[0]));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clocks[0])) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++ }
++
++ gcmkONERROR(gckCOMMAND_Start(command));
++
++ gcmkONERROR(_FlushCache(Hardware, command));
++
++ gckOS_Delay(gcvNULL, 1);
++
++ /* Stop the command parser. */
++ gcmkONERROR(gckCOMMAND_Stop(command, gcvFALSE));
++
++ flag |= gcvPOWER_FLAG_CLOCK_OFF;
++ }
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++ /* Only process this when hardware is enabled. */
++ if (Hardware->clockState && Hardware->powerState
++ /* Don't touch clock control if dynamic frequency scaling is available. */
++ && gckHARDWARE_IsFeatureAvailable(Hardware, gcvFEATURE_DYNAMIC_FREQUENCY_SCALING) != gcvTRUE
++ )
++ {
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ if (Hardware->identity.chipModel == gcv4000
++ && ((Hardware->identity.chipRevision == 0x5208) || (Hardware->identity.chipRevision == 0x5222)))
++ {
++ clock &= ~2U;
++ }
++ }
++
++ /* Write the clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++ }
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(Hardware,
++ Hardware->allowFastClear,
++ Hardware->allowCompression));
++
++ /* Force the command queue to reload the next context. */
++ command->currContext = gcvNULL;
++
++ /* Need to config mmu after command start. */
++ configMmu = gcvTRUE;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ Hardware->core,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++ if (flag & gcvPOWER_FLAG_START)
++ {
++ /* Start the command processor. */
++ gcmkONERROR(gckCOMMAND_Start(command));
++ commandStarted = gcvTRUE;
++
++ if (Hardware->startIsr)
++ {
++ /* Start the Isr. */
++ gcmkONERROR(Hardware->startIsr(Hardware->isrContext));
++ isrStarted = gcvTRUE;
++ }
++ }
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++
++ if (global)
++ {
++ /* Verify global semaphore has been acquired already before
++ ** we release it.
++ ** If it was acquired, gckOS_TryAcquireSemaphore will return
++ ** gcvSTATUS_TIMEOUT and we release it. Otherwise, global
++ ** semaphore will be acquried now, but it still is released
++ ** immediately. */
++ status = gckOS_TryAcquireSemaphore(os, Hardware->globalSemaphore);
++ if (status != gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Release the global semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, Hardware->globalSemaphore));
++ globalAcquired = gcvFALSE;
++ }
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++#if gcdDVFS
++ if (State == gcvPOWER_ON && Hardware->kernel->dvfs)
++ {
++ gckDVFS_Start(Hardware->kernel->dvfs);
++ }
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE || State == gcvPOWER_SUSPEND)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commandStarted)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Stop(command, gcvFALSE));
++ }
++
++ if (isrStarted)
++ {
++ gcmkVERIFY_OK(Hardware->stopIsr(Hardware->isrContext));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvTRUE));
++ }
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (globalAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ Hardware->globalSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ if(!Hardware->powerManagementLock)
++ {
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++
++ Hardware->powerManagement = PowerManagement;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementLock
++**
++** Disable dynamic GPU power management switch.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL Lock
++** Power Mangement Lock State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetPowerManagementLock(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Lock
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagementLock = Lock;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++/*******************************************************************************
++**
++** gckHARDWARE_SetGpuProfiler
++**
++** Configure GPU profiler function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL GpuProfiler
++** GOU Profiler State.
++**
++*/
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (GpuProfiler == gcvTRUE)
++ {
++ gctUINT32 data = 0;
++
++ /* Need to disable clock gating when doing profiling. */
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress +
++ 0x00100,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++
++
++ gcmkVERIFY_OK(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00100,
++ data));
++ }
++
++ Hardware->gpuProfiler = GpuProfiler;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ )
++{
++ gceSTATUS status;
++ gctUINT32 clock;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x FscaleValue=%d", Hardware, FscaleValue);
++
++ gcmkVERIFY_ARGUMENT(FscaleValue > 0 && FscaleValue <= 64);
++
++ gcmkONERROR(
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ Hardware->powerOnFscaleVal = FscaleValue;
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gctUINT32 data;
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ &data));
++
++ /* Disable all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1))))))) << (0 ? 7:7)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++ clock = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2))) | (((gctUINT32) ((gctUINT32) (FscaleValue) & ((gctUINT32) ((((1 ? 8:2) - (0 ? 8:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:2) - (0 ? 8:2) + 1))))))) << (0 ? 8:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Done loading the frequency scaler. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ /* Restore all clock gating. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ Hardware->powerBaseAddress
++ + 0x00104,
++ data));
++ }
++
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ )
++{
++ *FscaleValue = Hardware->powerOnFscaleVal;
++ *MinFscaleValue = Hardware->minFscaleValue;
++ *MaxFscaleValue = 64;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetMinFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT MinFscaleValue
++ )
++{
++ if (MinFscaleValue >= 1 && MinFscaleValue <= 64)
++ {
++ Hardware->minFscaleValue = MinFscaleValue;
++ }
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++)
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle, address;
++ gctBOOL isIdle;
++#if gcdMULTI_GPU > 1
++ gctUINT32 idle3D1 = 0;
++ gctUINT32 address3D1;
++ gctBOOL isIdle3D1 = gcvFALSE;
++#endif
++
++#if gcdINTERRUPT_STATISTIC
++ gctINT32 pendingInterrupt;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ isIdle = gcvTRUE;
++#if gcdMULTI_GPU > 1
++ isIdle3D1 = gcvTRUE;
++#endif
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00004, &idle));
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ gcmkONERROR(
++ gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ gcvCORE_3D_1_ID,
++ 0x00004,
++ &idle3D1));
++ }
++#endif
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ isIdle = gcvFALSE;
++ }
++
++ else
++ {
++#if gcdSECURITY
++ isIdle = gcvTRUE;
++ address = 0;
++#else
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00664,
++ &address));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address >= Hardware->lastWaitLink)
++#if gcdMULTI_GPU
++ && (address <= Hardware->lastWaitLink + 40)
++#else
++ && (address <= Hardware->lastWaitLink + 16)
++#endif
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ isIdle = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ isIdle = gcvFALSE;
++ }
++#endif
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle3D1)) >> (0 ? 1:1)) & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 3:3)) & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 4:4)) & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 5:5)) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 6:6)) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 7:7)) & ((gctUINT32) ((((1 ? 7:7) - (0 ? 7:7) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:7) - (0 ? 7:7) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle3D1)) >> (0 ? 2:2)) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ isIdle3D1 = gcvFALSE;
++ }
++
++ else
++ {
++ /* Read the current FE address. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Hardware->os,
++ Hardware->core,
++ gcvCORE_3D_1_ID,
++ 0x00664,
++ &address3D1));
++
++ /* Test if address is inside the last WAIT/LINK sequence. */
++ if ((address3D1 >= Hardware->lastWaitLink)
++ && (address3D1 <= Hardware->lastWaitLink + 40)
++ )
++ {
++ /* FE is in last WAIT/LINK and the pipe is idle. */
++ isIdle3D1 = gcvTRUE;
++ }
++ else
++ {
++ /* FE is not in WAIT/LINK yet. */
++ isIdle3D1 = gcvFALSE;
++ }
++ }
++ }
++#endif
++
++ }
++
++#if gcdINTERRUPT_STATISTIC
++ gcmkONERROR(gckOS_AtomGet(
++ Hardware->os,
++ Hardware->kernel->eventObj->interruptCount,
++ &pendingInterrupt
++ ));
++
++ if (pendingInterrupt)
++ {
++ isIdle = gcvFALSE;
++ }
++#endif
++
++#if gcdMULTI_GPU > 1
++ if (Hardware->core == gcvCORE_MAJOR)
++ {
++ *IsIdle = (isIdle & isIdle3D1);
++ }
++ else
++#endif
++ {
++ *IsIdle = isIdle;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** Handy macros that will help in reading those debug registers.
++*/
++
++#define gcmkREAD_DEBUG_REGISTER(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &profiler->data))
++
++#define gcmkREAD_DEBUG_REGISTER_N(control, block, index, data) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ index))); \
++ gcmkONERROR(\
++ gckOS_ReadRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_SIGNALS_##block##_Address, \
++ &data))
++
++#define gcmkRESET_DEBUG_REGISTER(control, block) \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 15))); \
++ gcmkONERROR(\
++ gckOS_WriteRegisterEx(Hardware->os, \
++ Hardware->core, \
++ GC_DEBUG_CONTROL##control##_Address, \
++ gcmSETFIELD(0, \
++ GC_DEBUG_CONTROL##control, \
++ block, \
++ 0)))
++
++/*******************************************************************************
++**
++** gckHARDWARE_ProfileEngine2D
++**
++** Read the profile registers available in the 2D engine and sets them in the
++** profile. The function will also reset the pixelsRendered counter every time.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** OPTIONAL gcs2D_PROFILE_PTR Profile
++** Pointer to a gcs2D_Profile structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ )
++{
++ gceSTATUS status;
++ gcs2D_PROFILE_PTR profiler = Profile;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Profile != gcvNULL)
++ {
++ /* Read the cycle count. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &Profile->cycleCount));
++
++ /* Read pixels rendered by 2D engine. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &profiler->pixelsRendered));
++
++ /* Reset counter. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++ gctUINT i, clock;
++ gctUINT32 colorKilled, colorDrawn, depthKilled, depthDrawn;
++ gctUINT32 totalRead, totalWrite;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
++#if VIVANTE_PROFILER_CONTEXT
++#define gcmkUPDATE_PROFILE_DATA(data) \
++ profilerHistroy->data += profiler->data
++
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Reset,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ )
++{
++ gceSTATUS status;
++ gckCOMMAND command = Hardware->kernel->command;
++ gcsPROFILER_COUNTERS * profiler = Counters;
++
++ gcmkHEADER_ARG("Hardware=0x%x Counters=0x%x", Hardware, Counters);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os, command->mutexContextSeq, gcvINFINITE
++ ));
++
++ /* Read the counters. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ profiler, &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ /* Reset counters. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ &Context->histroyProfiler, gcmSIZEOF(gcsPROFILER_COUNTERS)
++ ));
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os, command->mutexContextSeq
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gctUINT32
++CalcDelta(
++ IN gctUINT32 new,
++ IN gctUINT32 old
++ )
++{
++ if (new >= old)
++ {
++ return new - old;
++ }
++ else
++ {
++ return (gctUINT32)((gctUINT64)new + 0x100000000ll - old);
++ }
++}
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gcsPROFILER_COUNTERS * profiler = &Context->latestProfiler;
++ gcsPROFILER_COUNTERS * profilerHistroy = &Context->histroyProfiler;
++ gctUINT i, clock;
++ gctUINT32 colorKilled = 0, colorDrawn = 0, depthKilled = 0, depthDrawn = 0;
++ gctUINT32 totalRead, totalWrite;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 temp;
++ gctBOOL needResetShader = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x Context=0x%x", Hardware, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Context, gcvOBJ_CONTEXT);
++
++ chipModel = Hardware->identity.chipModel;
++ chipRevision = Hardware->identity.chipRevision;
++ if (chipModel == gcv2000 || (chipModel == gcv2100 && chipRevision == 0x5118))
++ {
++ needResetShader = gcvTRUE;
++ }
++
++ /* Read the counters. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &profiler->gpuCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &profiler->gpuTotalCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuTotalCyclesCounter);
++
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &profiler->gpuIdleCyclesCounter));
++ gcmkUPDATE_PROFILE_DATA(gpuIdleCyclesCounter);
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ profiler->gpuTotalRead64BytesPerFrame = 0;
++ profiler->gpuTotalWrite64BytesPerFrame = 0;
++ profiler->pe_pixel_count_killed_by_color_pipe = 0;
++ profiler->pe_pixel_count_killed_by_depth_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_color_pipe = 0;
++ profiler->pe_pixel_count_drawn_by_depth_pipe = 0;
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* BW */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &totalRead));
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &totalWrite));
++
++ profiler->gpuTotalRead64BytesPerFrame += totalRead;
++ profiler->gpuTotalWrite64BytesPerFrame += totalWrite;
++ gcmkUPDATE_PROFILE_DATA(gpuTotalRead64BytesPerFrame);
++ gcmkUPDATE_PROFILE_DATA(gpuTotalWrite64BytesPerFrame);
++
++ /* PE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthKilled));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &colorDrawn));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))));gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00454, &depthDrawn));
++
++ profiler->pe_pixel_count_killed_by_color_pipe += colorKilled;
++ profiler->pe_pixel_count_killed_by_depth_pipe += depthKilled;
++ profiler->pe_pixel_count_drawn_by_color_pipe += colorDrawn;
++ profiler->pe_pixel_count_drawn_by_depth_pipe += depthDrawn;
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_killed_by_depth_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_color_pipe);
++ gcmkUPDATE_PROFILE_DATA(pe_pixel_count_drawn_by_depth_pipe);
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++
++ /* Reset counters. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 1));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x0003C, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00438, 0));
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00078, 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* SH */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->ps_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->ps_inst_counter;
++ profiler->ps_inst_counter = CalcDelta(temp, Context->prevPSInstCount);
++ Context->prevPSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(ps_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_pixel_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_pixel_counter;
++ profiler->rendered_pixel_counter = CalcDelta(temp, Context->prevPSPixelCount);
++ Context->prevPSPixelCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_pixel_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vs_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vs_inst_counter;
++ profiler->vs_inst_counter = CalcDelta(temp, Context->prevVSInstCount);
++ Context->prevVSInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vs_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->rendered_vertice_counter));
++ if (needResetShader)
++ {
++ temp = profiler->rendered_vertice_counter;
++ profiler->rendered_vertice_counter = CalcDelta(temp, Context->prevVSVertexCount);
++ Context->prevVSVertexCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(rendered_vertice_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (11) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_branch_inst_counter;
++ profiler->vtx_branch_inst_counter = CalcDelta(temp, Context->prevVSBranchInstCount);
++ Context->prevVSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (12) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->vtx_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->vtx_texld_inst_counter;
++ profiler->vtx_texld_inst_counter = CalcDelta(temp, Context->prevVSTexInstCount);
++ Context->prevVSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(vtx_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (13) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_branch_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_branch_inst_counter;
++ profiler->pxl_branch_inst_counter = CalcDelta(temp, Context->prevPSBranchInstCount);
++ Context->prevPSBranchInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_branch_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (14) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0045C, &profiler->pxl_texld_inst_counter));
++ if (needResetShader)
++ {
++ temp = profiler->pxl_texld_inst_counter;
++ profiler->pxl_texld_inst_counter = CalcDelta(temp, Context->prevPSTexInstCount);
++ Context->prevPSTexInstCount = temp;
++ }
++ gcmkUPDATE_PROFILE_DATA(pxl_texld_inst_counter);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00470, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* PA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_vtx_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_vtx_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (4) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_input_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_input_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_output_prim_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_output_prim_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_depth_clipped_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_depth_clipped_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_trivial_rejected_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_trivial_rejected_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00460, &profiler->pa_culled_counter));
++ gcmkUPDATE_PROFILE_DATA(pa_culled_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* SE */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_triangle_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_triangle_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00464, &profiler->se_culled_lines_count));
++ gcmkUPDATE_PROFILE_DATA(se_culled_lines_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* RA */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_pixel_count));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_pixel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_quad_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_quad_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_valid_quad_count_after_early_z));
++ gcmkUPDATE_PROFILE_DATA(ra_valid_quad_count_after_early_z);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_total_primitive_count));
++ gcmkUPDATE_PROFILE_DATA(ra_total_primitive_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_pipe_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_pipe_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (10) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00448, &profiler->ra_prefetch_cache_miss_counter));
++ gcmkUPDATE_PROFILE_DATA(ra_prefetch_cache_miss_counter);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 23:16) - (0 ? 23:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:16) - (0 ? 23:16) + 1))))))) << (0 ? 23:16)))
++));
++
++ /* TX */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_bilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_bilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_trilinear_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_trilinear_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_discarded_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_discarded_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_total_texture_requests));
++ gcmkUPDATE_PROFILE_DATA(tx_total_texture_requests);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (5) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (6) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_mem_read_in_8B_count));
++ gcmkUPDATE_PROFILE_DATA(tx_mem_read_in_8B_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (7) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (8) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_hit_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_hit_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (9) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0044C, &profiler->tx_cache_miss_texel_count));
++ gcmkUPDATE_PROFILE_DATA(tx_cache_miss_texel_count);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00474, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))) << (0 ? 31:24)))
++));
++
++ /* MC */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_read_req_8B_from_IP));
++ gcmkUPDATE_PROFILE_DATA(mc_total_read_req_8B_from_IP);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (3) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x00468, &profiler->mc_total_write_req_8B_from_pipeline));
++ gcmkUPDATE_PROFILE_DATA(mc_total_write_req_8B_from_pipeline);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 7:0) - (0 ? 7:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 7:0) - (0 ? 7:0) + 1))))))) << (0 ? 7:0)))
++));
++
++ /* HI */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_read_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_read_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_request_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_request_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (2) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os, Hardware->core, 0x0046C, &profiler->hi_axi_cycles_write_data_stalled));
++ gcmkUPDATE_PROFILE_DATA(hi_axi_cycles_write_data_stalled);
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (15) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) ));
++gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, 0x00478, ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))
++));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++
++#if VIVANTE_PROFILER_NEW
++gceSTATUS
++gckHARDWARE_InitProfiler(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 control;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &control));
++ /* Enable debug register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1))))))) << (0 ? 11:11)))));
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ResetGPU(
++ IN gckHARDWARE Hardware,
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ Hardware->powerBaseAddress +
++ 0x00104,
++ 0x00000000));
++
++ control = ((((gctUINT32) (0x01590880)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1))))))) << (0 ? 17:17)));
++
++ /* Disable pulse-eater. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x0010C,
++ control));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)))));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ 0x00000900));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (0x00000900)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ Core,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Core == gcvCORE_MAJOR)
++ {
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Os,
++ Core,
++ gcvCORE_3D_1_ID,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++ }
++#endif
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ Core,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++#if gcdMULTI_GPU > 1
++ if (Core == gcvCORE_MAJOR)
++ {
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterByCoreId(Os,
++ Core,
++ gcvCORE_3D_1_ID,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++ }
++#endif
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++
++ /* Hardware reset. */
++ status = gckOS_ResetGPU(Hardware->os, Hardware->core);
++
++ if (gcmIS_ERROR(status))
++ {
++ if (Hardware->identity.chipRevision < 0x4600)
++ {
++ /* Not supported - we need the isolation bit. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ /* Soft reset. */
++ gcmkONERROR(_ResetGPU(Hardware, Hardware->os, Hardware->core));
++ }
++
++ /* Initialize hardware. */
++ gcmkONERROR(gckHARDWARE_InitializeHardware(Hardware));
++
++ /* Jump to address into which GPU should run if it doesn't stuck. */
++ gcmkONERROR(gckHARDWARE_Execute(Hardware, Hardware->kernel->restoreAddress, 16));
++
++ gcmkPRINT("[galcore]: recovery done");
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkPRINT("[galcore]: Hardware not reset successfully, give up");
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Test if we have a new Memory Controller. */
++ if (((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))))
++ {
++ /* No base address required. */
++ *BaseAddress = 0;
++ }
++ else
++ {
++ /* Get the base address from the OS. */
++ gcmkONERROR(gckOS_GetBaseAddress(Hardware->os, BaseAddress));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ )
++{
++ gctBOOL need = gcvFALSE;
++
++ gcmkHEADER_ARG("Hardware=0x%x State=0x%08x", Hardware, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(NeedBase != gcvNULL);
++
++ /* Make sure this is a load state. */
++ if (((((gctUINT32) (State)) >> (0 ? 31:27) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))))
++ {
++#if gcdENABLE_3D
++ /* Get the state address. */
++ switch ((((((gctUINT32) (State)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) ))
++ {
++ case 0x0596:
++ case 0x0597:
++ case 0x0599:
++ case 0x059A:
++ case 0x05A9:
++ /* These states need a TRUE physical address. */
++ need = gcvTRUE;
++ break;
++ }
++#else
++ /* 2D addresses don't need a base address. */
++#endif
++ }
++
++ /* Return the flag. */
++ *NeedBase = need;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedBase=%d", *NeedBase);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Hardware=0x%x, StartIsr=0x%x, StopIsr=0x%x, Context=0x%x",
++ Hardware, StartIsr, StopIsr, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (StartIsr == gcvNULL ||
++ StopIsr == gcvNULL ||
++ Context == gcvNULL)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ Hardware->startIsr = StartIsr;
++ Hardware->stopIsr = StopIsr;
++ Hardware->isrContext = Context;
++
++ /* Success. */
++ gcmkFOOTER();
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_Compose
++**
++** Start a composition.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ )
++{
++#if gcdENABLE_3D
++ gceSTATUS status;
++ gctUINT32_PTR triggerState;
++
++ gcmkHEADER_ARG("Hardware=0x%x Physical=0x%x Logical=0x%x"
++ " Offset=%d Size=%d EventID=%d",
++ Hardware, Physical, Logical, Offset, Size, EventID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(((Size + 8) & 63) == 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Program the trigger state. */
++ triggerState = (gctUINT32_PTR) ((gctUINT8_PTR) Logical + Offset + Size);
++ triggerState[0] = 0x0C03;
++ triggerState[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 5:4) - (0 ? 5:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:4) - (0 ? 5:4) + 1))))))) << (0 ? 5:4)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1))))))) << (0 ? 8:8)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16))) | (((gctUINT32) ((gctUINT32) (EventID) & ((gctUINT32) ((((1 ? 20:16) - (0 ? 20:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:16) - (0 ? 20:16) + 1))))))) << (0 ? 20:16)))
++ ;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Hardware->os, ProcessID, gcvNULL,
++ (gctUINT32)Physical, Logical, Offset + Size
++ ));
++#endif
++
++ /* Start composition. */
++ gcmkONERROR(gckOS_WriteRegisterEx(
++ Hardware->os, Hardware->core, 0x00554,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)))
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ /* Return the status. */
++ return gcvSTATUS_NOT_SUPPORTED;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_IsFeatureAvailable
++**
++** Verifies whether the specified feature is available in hardware.
++**
++** INPUT:
++**
++** gckHARDWARE Hardware
++** Pointer to an gckHARDWARE object.
++**
++** gceFEATURE Feature
++** Feature to be verified.
++*/
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ )
++{
++ gctBOOL available;
++
++ gcmkHEADER_ARG("Hardware=0x%x Feature=%d", Hardware, Feature);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Only features needed by common kernel logic added here. */
++ switch (Feature)
++ {
++ case gcvFEATURE_END_EVENT:
++ /*available = gcmVERIFYFIELDVALUE(Hardware->identity.chipMinorFeatures2,
++ GC_MINOR_FEATURES2, END_EVENT, AVAILABLE
++ );*/
++ available = gcvFALSE;
++ break;
++
++ case gcvFEATURE_MC20:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 22:22) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1)))))));
++ break;
++
++ case gcvFEATURE_EARLY_Z:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x0 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ case gcvFEATURE_HZ:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures)) >> (0 ? 27:27) & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 27:27) - (0 ? 27:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:27) - (0 ? 27:27) + 1)))))));
++ break;
++
++ case gcvFEATURE_NEW_HZ:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 26:26) & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 26:26) - (0 ? 26:26) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:26) - (0 ? 26:26) + 1)))))));
++ break;
++
++ case gcvFEATURE_FAST_MSAA:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 8:8) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))));
++ break;
++
++ case gcvFEATURE_SMALL_MSAA:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++ break;
++
++ case gcvFEATURE_DYNAMIC_FREQUENCY_SCALING:
++ /* This feature doesn't apply for 2D cores. */
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures2)) >> (0 ? 14:14) & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 14:14) - (0 ? 14:14) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 14:14) - (0 ? 14:14) + 1)))))))
++ && ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++
++ if (Hardware->identity.chipModel == gcv1000 &&
++ (Hardware->identity.chipRevision == 0x5039 ||
++ Hardware->identity.chipRevision == 0x5040))
++ {
++ available = gcvFALSE;
++ }
++ break;
++
++ case gcvFEATURE_ACE:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures3)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++ break;
++
++ case gcvFEATURE_HALTI2:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures4)) >> (0 ? 16:16) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_2D:
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 9:9) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))));
++ break;
++
++ case gcvFEATURE_PIPE_3D:
++#if gcdENABLE_3D
++ available = ((((gctUINT32) (Hardware->identity.chipFeatures)) >> (0 ? 2:2) & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1)))))));
++#else
++ available = gcvFALSE;
++#endif
++ break;
++
++ case gcvFEATURE_FC_FLUSH_STALL:
++ available = ((((gctUINT32) (Hardware->identity.chipMinorFeatures1)) >> (0 ? 31:31) & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 31:31) - (0 ? 31:31) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:31) - (0 ? 31:31) + 1)))))));
++ break;
++
++ default:
++ gcmkFATAL("Invalid feature has been requested.");
++ available = gcvFALSE;
++ }
++
++ /* Return result. */
++ gcmkFOOTER_ARG("%d", available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE);
++ return available ? gcvSTATUS_TRUE : gcvSTATUS_FALSE;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpMMUException
++**
++** Dump the MMU debug info on an MMU exception.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ )
++{
++ gctUINT32 mmu = 0;
++ gctUINT32 mmuStatus = 0;
++ gctUINT32 address = 0;
++ gctUINT32 i = 0;
++ gctUINT32 mtlb = 0;
++ gctUINT32 stlb = 0;
++ gctUINT32 offset = 0;
++#if gcdPROCESS_ADDRESS_SPACE
++ gcsDATABASE_PTR database;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkPRINT("GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ Hardware->core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** MMU ERROR DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00188,
++ &mmuStatus));
++
++ gcmkPRINT(" MMU status = 0x%08X\n", mmuStatus);
++
++ for (i = 0; i < 4; i += 1)
++ {
++ mmu = mmuStatus & 0xF;
++ mmuStatus >>= 4;
++
++ if (mmu == 0)
++ {
++ continue;
++ }
++
++ switch (mmu)
++ {
++ case 1:
++ gcmkPRINT(" MMU%d: slave not present\n", i);
++ break;
++
++ case 2:
++ gcmkPRINT(" MMU%d: page not present\n", i);
++ break;
++
++ case 3:
++ gcmkPRINT(" MMU%d: write violation\n", i);
++ break;
++
++ default:
++ gcmkPRINT(" MMU%d: unknown state\n", i);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00190 + i * 4,
++ &address));
++
++ mtlb = (address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++ offset = address & gcdMMU_OFFSET_4K_MASK;
++
++ gcmkPRINT(" MMU%d: exception address = 0x%08X\n", i, address);
++
++ gcmkPRINT(" MTLB entry = %d\n", mtlb);
++
++ gcmkPRINT(" STLB entry = %d\n", stlb);
++
++ gcmkPRINT(" Offset = 0x%08X (%d)\n", offset, offset);
++
++ gckMMU_DumpPageTableEntry(Hardware->kernel->mmu, address);
++
++#if gcdPROCESS_ADDRESS_SPACE
++ for (i = 0; i < gcmCOUNTOF(Hardware->kernel->db->db); ++i)
++ {
++ for (database = Hardware->kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ gcmkPRINT(" database [%d] :", database->processID);
++ gckMMU_DumpPageTableEntry(database->mmu, address);
++ }
++ }
++#endif
++ }
++
++ gckHARDWARE_DumpGPUState(Hardware);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_DumpGPUState
++**
++** Dump the GPU debug registers.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ )
++{
++ static gctCONST_STRING _cmdState[] =
++ {
++ "PAR_IDLE_ST", "PAR_DEC_ST", "PAR_ADR0_ST", "PAR_LOAD0_ST",
++ "PAR_ADR1_ST", "PAR_LOAD1_ST", "PAR_3DADR_ST", "PAR_3DCMD_ST",
++ "PAR_3DCNTL_ST", "PAR_3DIDXCNTL_ST", "PAR_INITREQDMA_ST",
++ "PAR_DRAWIDX_ST", "PAR_DRAW_ST", "PAR_2DRECT0_ST", "PAR_2DRECT1_ST",
++ "PAR_2DDATA0_ST", "PAR_2DDATA1_ST", "PAR_WAITFIFO_ST", "PAR_WAIT_ST",
++ "PAR_LINK_ST", "PAR_END_ST", "PAR_STALL_ST"
++ };
++
++ static gctCONST_STRING _cmdDmaState[] =
++ {
++ "CMD_IDLE_ST", "CMD_START_ST", "CMD_REQ_ST", "CMD_END_ST"
++ };
++
++ static gctCONST_STRING _cmdFetState[] =
++ {
++ "FET_IDLE_ST", "FET_RAMVALID_ST", "FET_VALID_ST"
++ };
++
++ static gctCONST_STRING _reqDmaState[] =
++ {
++ "REQ_IDLE_ST", "REQ_WAITIDX_ST", "REQ_CAL_ST"
++ };
++
++ static gctCONST_STRING _calState[] =
++ {
++ "CAL_IDLE_ST", "CAL_LDADR_ST", "CAL_IDXCALC_ST"
++ };
++
++ static gctCONST_STRING _veReqState[] =
++ {
++ "VER_IDLE_ST", "VER_CKCACHE_ST", "VER_MISS_ST"
++ };
++
++ static gcsiDEBUG_REGISTERS _dbgRegs[] =
++ {
++ { "RA", 0x474, 16, 0x448, 16, 0x12344321 },
++ { "TX", 0x474, 24, 0x44C, 16, 0x12211221 },
++ { "FE", 0x470, 0, 0x450, 16, 0xBABEF00D },
++ { "PE", 0x470, 16, 0x454, 16, 0xBABEF00D },
++ { "DE", 0x470, 8, 0x458, 16, 0xBABEF00D },
++ { "SH", 0x470, 24, 0x45C, 16, 0xDEADBEEF },
++ { "PA", 0x474, 0, 0x460, 16, 0x0000AAAA },
++ { "SE", 0x474, 8, 0x464, 16, 0x5E5E5E5E },
++ { "MC", 0x478, 0, 0x468, 16, 0x12345678 },
++ { "HI", 0x478, 8, 0x46C, 16, 0xAAAAAAAA }
++ };
++
++ static gctUINT32 _otherRegs[] =
++ {
++ 0x040, 0x044, 0x04C, 0x050, 0x054, 0x058, 0x05C, 0x060,
++ 0x43c, 0x440, 0x444, 0x414,
++ };
++
++ gceSTATUS status;
++ gckKERNEL kernel = gcvNULL;
++ gctUINT32 idle = 0, axi = 0;
++ gctUINT32 dmaAddress1 = 0, dmaAddress2 = 0;
++ gctUINT32 dmaState1 = 0, dmaState2 = 0;
++ gctUINT32 dmaLow = 0, dmaHigh = 0;
++ gctUINT32 cmdState = 0, cmdDmaState = 0, cmdFetState = 0;
++ gctUINT32 dmaReqState = 0, calState = 0, veReqState = 0;
++ gctUINT i;
++ gctUINT pipe = 0, pixelPipes = 0;
++ gctUINT32 control = 0, oldControl = 0;
++ gckOS os = Hardware->os;
++ gceCORE core = Hardware->core;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ kernel = Hardware->kernel;
++
++ gcmkPRINT_N(12, "GPU[%d](ChipModel=0x%x ChipRevision=0x%x):\n",
++ core,
++ Hardware->identity.chipModel,
++ Hardware->identity.chipRevision);
++
++ pixelPipes = Hardware->identity.pixelPipes
++ ? Hardware->identity.pixelPipes
++ : 1;
++
++ /* Reset register values. */
++ idle = axi =
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 =
++ dmaLow = dmaHigh = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkONERROR(_VerifyDMA(
++ os, core, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++ cmdState = dmaState2 & 0x1F;
++ cmdDmaState = (dmaState2 >> 8) & 0x03;
++ cmdFetState = (dmaState2 >> 10) & 0x03;
++ dmaReqState = (dmaState2 >> 12) & 0x03;
++ calState = (dmaState2 >> 14) & 0x03;
++ veReqState = (dmaState2 >> 16) & 0x03;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x004, &idle));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x00C, &axi));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x668, &dmaLow));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x66C, &dmaHigh));
++
++ gcmkPRINT_N(0, "**************************\n");
++ gcmkPRINT_N(0, "*** GPU STATE DUMP ***\n");
++ gcmkPRINT_N(0, "**************************\n");
++
++ gcmkPRINT_N(4, " axi = 0x%08X\n", axi);
++
++ gcmkPRINT_N(4, " idle = 0x%08X\n", idle);
++ if ((idle & 0x00000001) == 0) gcmkPRINT_N(0, " FE not idle\n");
++ if ((idle & 0x00000002) == 0) gcmkPRINT_N(0, " DE not idle\n");
++ if ((idle & 0x00000004) == 0) gcmkPRINT_N(0, " PE not idle\n");
++ if ((idle & 0x00000008) == 0) gcmkPRINT_N(0, " SH not idle\n");
++ if ((idle & 0x00000010) == 0) gcmkPRINT_N(0, " PA not idle\n");
++ if ((idle & 0x00000020) == 0) gcmkPRINT_N(0, " SE not idle\n");
++ if ((idle & 0x00000040) == 0) gcmkPRINT_N(0, " RA not idle\n");
++ if ((idle & 0x00000080) == 0) gcmkPRINT_N(0, " TX not idle\n");
++ if ((idle & 0x00000100) == 0) gcmkPRINT_N(0, " VG not idle\n");
++ if ((idle & 0x00000200) == 0) gcmkPRINT_N(0, " IM not idle\n");
++ if ((idle & 0x00000400) == 0) gcmkPRINT_N(0, " FP not idle\n");
++ if ((idle & 0x00000800) == 0) gcmkPRINT_N(0, " TS not idle\n");
++ if ((idle & 0x80000000) != 0) gcmkPRINT_N(0, " AXI low power mode\n");
++
++ if (
++ (dmaAddress1 == dmaAddress2)
++ && (dmaState1 == dmaState2)
++ )
++ {
++ gcmkPRINT_N(0, " DMA appears to be stuck at this address:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ }
++ else
++ {
++ if (dmaAddress1 == dmaAddress2)
++ {
++ gcmkPRINT_N(0, " DMA address is constant, but state is changing:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaState2);
++ }
++ else
++ {
++ gcmkPRINT_N(0, " DMA is running; known addresses are:\n");
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress1);
++ gcmkPRINT_N(4, " 0x%08X\n", dmaAddress2);
++ }
++ }
++
++ gcmkPRINT_N(4, " dmaLow = 0x%08X\n", dmaLow);
++ gcmkPRINT_N(4, " dmaHigh = 0x%08X\n", dmaHigh);
++ gcmkPRINT_N(4, " dmaState = 0x%08X\n", dmaState2);
++ gcmkPRINT_N(8, " command state = %d (%s)\n", cmdState, _cmdState [cmdState]);
++ gcmkPRINT_N(8, " command DMA state = %d (%s)\n", cmdDmaState, _cmdDmaState[cmdDmaState]);
++ gcmkPRINT_N(8, " command fetch state = %d (%s)\n", cmdFetState, _cmdFetState[cmdFetState]);
++ gcmkPRINT_N(8, " DMA request state = %d (%s)\n", dmaReqState, _reqDmaState[dmaReqState]);
++ gcmkPRINT_N(8, " cal state = %d (%s)\n", calState, _calState [calState]);
++ gcmkPRINT_N(8, " VE request state = %d (%s)\n", veReqState, _veReqState [veReqState]);
++
++ /* Record control. */
++ gckOS_ReadRegisterEx(os, core, 0x0, &oldControl);
++
++ for (pipe = 0; pipe < pixelPipes; pipe++)
++ {
++ gcmkPRINT_N(4, " Debug registers of pipe[%d]:\n", pipe);
++
++ /* Switch pipe. */
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x0, &control));
++ control &= ~(0xF << 20);
++ control |= (pipe << 20);
++ gcmkONERROR(gckOS_WriteRegisterEx(os, core, 0x0, control));
++
++ for (i = 0; i < gcmCOUNTOF(_dbgRegs); i += 1)
++ {
++ gcmkONERROR(_DumpDebugRegisters(os, core, &_dbgRegs[i]));
++ }
++
++ gcmkPRINT_N(0, " Other Registers:\n");
++ for (i = 0; i < gcmCOUNTOF(_otherRegs); i += 1)
++ {
++ gctUINT32 read;
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, _otherRegs[i], &read));
++ gcmkPRINT_N(12, " [0x%04X] 0x%08X\n", _otherRegs[i], read);
++ }
++ }
++
++ if (kernel->hardware->identity.chipFeatures & (1 << 4))
++ {
++ gctUINT32 read0, read1, write;
++
++ read0 = read1 = write = 0;
++
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x43C, &read0));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x440, &read1));
++ gcmkONERROR(gckOS_ReadRegisterEx(os, core, 0x444, &write));
++
++ gcmkPRINT_N(4, " read0 = 0x%08X\n", read0);
++ gcmkPRINT_N(4, " read1 = 0x%08X\n", read1);
++ gcmkPRINT_N(4, " write = 0x%08X\n", write);
++ }
++
++ /* Restore control. */
++ gcmkONERROR(gckOS_WriteRegisterEx(os, core, 0x0, oldControl));
++
++ /* dump stack. */
++ gckOS_DumpCallStack(os);
++
++OnError:
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckHARDWARE_ReadPerformanceRegister(
++ IN gckHARDWARE Hardware,
++ IN gctUINT PerformanceAddress,
++ IN gctUINT IndexAddress,
++ IN gctUINT IndexShift,
++ IN gctUINT Index,
++ OUT gctUINT32_PTR Value
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x PerformanceAddress=0x%x IndexAddress=0x%x "
++ "IndexShift=%u Index=%u",
++ Hardware, PerformanceAddress, IndexAddress, IndexShift,
++ Index);
++
++ /* Write the index. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ IndexAddress,
++ Index << IndexShift));
++
++ /* Read the register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ PerformanceAddress,
++ Value));
++
++ /* Test for reset. */
++ if (Index == 15)
++ {
++ /* Index another register to get out of reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os, Hardware->core, IndexAddress, 0));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=0x%x", *Value);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ )
++{
++ gceSTATUS status;
++ gctUINT i, clock;
++ gcsHAL_FRAME_INFO info;
++#if gcdFRAME_DB_RESET
++ gctUINT reset;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Get profile tick. */
++ gcmkONERROR(gckOS_GetProfileTick(&info.ticks));
++
++ /* Read SH counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 4,
++ &info.shaderCycles));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 9,
++ &info.vsInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 12,
++ &info.vsTextureCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 7,
++ &info.psInstructionCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 14,
++ &info.psTextureCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0045C,
++ 0x00470,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read PA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 3,
++ &info.vertexCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 4,
++ &info.primitiveCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 7,
++ &info.rejectedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 8,
++ &info.culledPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 6,
++ &info.clippedPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 5,
++ &info.outPrimitives));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00460,
++ 0x00474,
++ 0,
++ 15,
++ &reset));
++#endif
++
++ /* Read RA counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 3,
++ &info.inPrimitives));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 11,
++ &info.culledQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 1,
++ &info.totalQuadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 2,
++ &info.quadCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 0,
++ &info.totalPixelCount));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00448,
++ 0x00474,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Read TX counters and reset them. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 0,
++ &info.bilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 1,
++ &info.trilinearRequests));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 8,
++ &info.txHitCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 9,
++ &info.txMissCount));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 6,
++ &info.txBytes8));
++#if gcdFRAME_DB_RESET
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x0044C,
++ 0x00474,
++ 24,
++ 15,
++ &reset));
++#endif
++
++ /* Read clock control register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ &clock));
++
++ /* Walk through all avaiable pixel pipes. */
++ for (i = 0; i < Hardware->identity.pixelPipes; ++i)
++ {
++ /* Select proper pipe. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ ((((gctUINT32) (clock)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20))) | (((gctUINT32) ((gctUINT32) (i) & ((gctUINT32) ((((1 ? 23:20) - (0 ? 23:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:20) - (0 ? 23:20) + 1))))))) << (0 ? 23:20)))));
++
++ /* Read cycle registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ &info.cycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0007C,
++ &info.idleCycles[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00438,
++ &info.mcCycles[i]));
++
++ /* Read bandwidth registers. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0005C,
++ &info.readRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00040,
++ &info.readBytes8[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00050,
++ &info.writeRequests[i]));
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00044,
++ &info.writeBytes8[i]));
++
++ /* Read PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 0,
++ &info.colorKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 2,
++ &info.colorDrawn[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 1,
++ &info.depthKilled[i]));
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 3,
++ &info.depthDrawn[i]));
++ }
++
++ /* Zero out remaning reserved counters. */
++ for (; i < 8; ++i)
++ {
++ info.readBytes8[i] = 0;
++ info.writeBytes8[i] = 0;
++ info.cycles[i] = 0;
++ info.idleCycles[i] = 0;
++ info.mcCycles[i] = 0;
++ info.readRequests[i] = 0;
++ info.writeRequests[i] = 0;
++ info.colorKilled[i] = 0;
++ info.colorDrawn[i] = 0;
++ info.depthKilled[i] = 0;
++ info.depthDrawn[i] = 0;
++ }
++
++ /* Reset clock control register. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00000,
++ clock));
++
++ /* Reset cycle and bandwidth counters. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 1));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0003C,
++ 0));
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00078,
++ 0));
++
++#if gcdFRAME_DB_RESET
++ /* Reset PE counters. */
++ gcmkONERROR(gckHARDWARE_ReadPerformanceRegister(
++ Hardware,
++ 0x00454,
++ 0x00470,
++ 16,
++ 15,
++ &reset));
++#endif
++
++ /* Copy to user. */
++ gcmkONERROR(gckOS_CopyToUserData(Hardware->os,
++ &info,
++ FrameInfo,
++ gcmSIZEOF(info)));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdDVFS
++#define READ_FROM_EATER1 0
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ )
++{
++ gctUINT32 debug1;
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Load != gcvNULL);
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00110,
++ Load));
++#if READ_FROM_EATER1
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00134,
++ Load));
++#endif
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00114,
++ &debug1));
++
++ /* Patch result of 0x110 with result of 0x114. */
++ if ((debug1 & 0xFF) == 1)
++ {
++ *Load &= ~0xFF;
++ *Load |= 1;
++ }
++
++ if (((debug1 & 0xFF00) >> 8) == 1)
++ {
++ *Load &= ~(0xFF << 8);
++ *Load |= 1 << 8;
++ }
++
++ if (((debug1 & 0xFF0000) >> 16) == 1)
++ {
++ *Load &= ~(0xFF << 16);
++ *Load |= 1 << 16;
++ }
++
++ if (((debug1 & 0xFF000000) >> 24) == 1)
++ {
++ *Load &= ~(0xFF << 24);
++ *Load |= 1 << 24;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 Frequency
++ )
++{
++ gceSTATUS status;
++ gctUINT32 period;
++ gctUINT32 eater;
++
++#if READ_FROM_EATER1
++ gctUINT32 period1;
++ gctUINT32 eater1;
++#endif
++
++ gcmkHEADER_ARG("Hardware=0x%X Frequency=%d", Hardware, Frequency);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ period = 0;
++
++ while((64 << period) < (gcdDVFS_ANAYLSE_WINDOW * Frequency * 1000) )
++ {
++ period++;
++ }
++
++#if READ_FROM_EATER1
++ /*
++ * Peroid = F * 1000 * 1000 / (60 * 16 * 1024);
++ */
++ period1 = Frequency * 6250 / 6114;
++#endif
++
++ gckOS_AcquireMutex(Hardware->os, Hardware->powerMutex, gcvINFINITE);
++
++ if (Hardware->chipPowerState == gcvPOWER_ON)
++ {
++ /* Get current configure. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &eater));
++
++ /* Change peroid. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ ((((gctUINT32) (eater)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8))) | (((gctUINT32) ((gctUINT32) (period) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1))))))) << (0 ? 15:8)))));
++
++#if READ_FROM_EATER1
++ /* Config eater1. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ &eater1));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x00130,
++ ((((gctUINT32) (eater1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16))) | (((gctUINT32) ((gctUINT32) (period1) & ((gctUINT32) ((((1 ? 31:16) - (0 ? 31:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:16) - (0 ? 31:16) + 1))))))) << (0 ? 31:16)))));
++#endif
++ }
++ else
++ {
++ status = gcvSTATUS_INVALID_REQUEST;
++ }
++
++OnError:
++ gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gctUINT32 data;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ gcmkONERROR(gckOS_ReadRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ &data));
++
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1))))))) << (0 ? 18:18)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 23:23) - (0 ? 23:23) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 23:23) - (0 ? 23:23) + 1))))))) << (0 ? 23:23)));
++ data = ((((gctUINT32) (data)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 22:22) - (0 ? 22:22) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 22:22) - (0 ? 22:22) + 1))))))) << (0 ? 22:22)));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "DVFS Configure=0x%X",
++ data);
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Hardware->os,
++ Hardware->core,
++ 0x0010C,
++ data));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckHARDWARE_PrepareFunctions
++**
++** Generate command buffer snippets which will be used by gckHARDWARE, by which
++** gckHARDWARE can manipulate GPU by FE command without using gckCOMMAND to avoid
++** race condition and deadlock.
++**
++** Notice:
++** 1. Each snippet can only be executed when GPU is idle.
++** 2. Execution is triggered by AHB (0x658)
++** 3. Each snippet followed by END so software can sync with GPU by checking GPU
++** idle
++** 4. It is transparent to gckCOMMAND command buffer.
++**
++** Existing Snippets:
++** 1. MMU Configure
++** For new MMU, after GPU is reset, FE execute this command sequence to enble MMU.
++*/
++gceSTATUS
++gckHARDWARE_PrepareFunctions(
++ gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckOS os;
++ gctUINT32 offset = 0;
++ gctUINT32 mmuBytes;
++ gctUINT32 endBytes;
++ gctUINT8_PTR logical;
++
++ gcmkHEADER_ARG("%x", Hardware);
++
++ os = Hardware->os;
++
++ gcmkVERIFY_OK(gckOS_GetPageSize(os, &Hardware->functionBytes));
++
++ /* Allocate a command buffer. */
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ os,
++ gcvFALSE,
++ &Hardware->functionBytes,
++ &Hardware->functionPhysical,
++ &Hardware->functionLogical
++ ));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ os,
++ Hardware->functionLogical,
++ &Hardware->functionAddress
++ ));
++
++ if (Hardware->mmuVersion > 0)
++ {
++ /* MMU configure command sequence. */
++ logical = (gctUINT8_PTR)Hardware->functionLogical + offset;
++
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].address
++ = Hardware->functionAddress + offset;
++
++ gcmkONERROR(gckHARDWARE_SetMMUStates(
++ Hardware,
++ Hardware->kernel->mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Hardware->kernel->mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ logical,
++ &mmuBytes
++ ));
++
++ offset += mmuBytes;
++
++ logical = (gctUINT8_PTR)Hardware->functionLogical + offset;
++
++ gcmkONERROR(gckHARDWARE_End(
++ Hardware,
++ gcvNULL,
++ &endBytes
++ ));
++
++ gcmkONERROR(gckHARDWARE_End(
++ Hardware,
++ logical,
++ &endBytes
++ ));
++
++ offset += endBytes;
++
++ Hardware->functions[gcvHARDWARE_FUNCTION_MMU].bytes = mmuBytes + endBytes;
++ }
++
++ gcmkASSERT(offset < Hardware->functionBytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_hardware.h 2015-07-27 23:13:06.186908111 +0200
+@@ -0,0 +1,160 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_h_
++#define __gc_hal_kernel_hardware_h_
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_hardware_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++typedef enum {
++ gcvHARDWARE_FUNCTION_MMU,
++ gcvHARDWARE_FUNCTION_FLUSH,
++
++ gcvHARDWARE_FUNCTION_NUM,
++}
++gceHARDWARE_FUNCTION;
++
++
++typedef struct _gcsHARWARE_FUNCTION
++{
++ /* Entry of the function. */
++ gctUINT32 address;
++
++ /* Bytes of the function. */
++ gctUINT32 bytes;
++}
++gcsHARDWARE_FUNCTION;
++
++/* gckHARDWARE object. */
++struct _gckHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gctKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Pointer to gctOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Chip characteristics. */
++ gcsHAL_QUERY_CHIP_IDENTITY identity;
++ gctBOOL allowFastClear;
++ gctBOOL allowCompression;
++ gctUINT32 powerBaseAddress;
++ gctBOOL extraEventStates;
++
++ /* Big endian */
++ gctBOOL bigEndian;
++
++ /* Chip status */
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gctUINT32 lastWaitLink;
++ gctUINT32 lastEnd;
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER globalSemaphore;
++
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++
++ gctUINT32 mmuVersion;
++
++ /* Whether use new MMU. It is meaningless
++ ** for old MMU since old MMU is always enabled.
++ */
++ gctBOOL enableMMU;
++
++ /* Type */
++ gceHARDWARE_TYPE type;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ gctUINT32 powerOnFscaleVal;
++#endif
++ gctPOINTER pageTableDirty;
++
++#if gcdLINK_QUEUE_SIZE
++ struct _gckLINKQUEUE linkQueue;
++#endif
++
++ gctBOOL powerManagement;
++ gctBOOL powerManagementLock;
++ gctBOOL gpuProfiler;
++
++ gctBOOL endAfterFlushMmuCache;
++
++ gctUINT32 minFscaleValue;
++
++ gctPOINTER pendingEvent;
++
++ /* Function used by gckHARDWARE. */
++ gctPHYS_ADDR functionPhysical;
++ gctPOINTER functionLogical;
++ gctUINT32 functionAddress;
++ gctSIZE_T functionBytes;
++
++ gcsHARDWARE_FUNCTION functions[gcvHARDWARE_FUNCTION_NUM];
++};
++
++gceSTATUS
++gckHARDWARE_GetBaseAddress(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++gceSTATUS
++gckHARDWARE_NeedBaseAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 State,
++ OUT gctBOOL_PTR NeedBase
++ );
++
++gceSTATUS
++gckHARDWARE_GetFrameInfo(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_FRAME_INFO * FrameInfo
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_recorder.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_recorder.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_recorder.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/arch/gc_hal_kernel_recorder.c 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,679 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_context.h"
++
++/*
++ * -----------------------
++ * HARDWARE STATE RECORDER
++ * -----------------------
++ *
++ * State mirror buffer is used to 'mirror' hardware states since hardware
++ * states can't be dumpped. It is a context buffer which stores 'global'
++ * context.
++ *
++ * For each commit, state recorder
++ * 1) Records context buffer (if there is) and command buffers in this commit.
++ * 2) Parse those buffers to estimate the state changed.
++ * 3) Stores result to a mirror buffer.
++ *
++ * == Commit 0 ====================================================================
++ *
++ * Context Buffer 0
++ *
++ * Command Buffer 0
++ *
++ * Mirror Buffer 0 <- Context Buffer 0 + Command Buffer 0
++ *
++ * == Commit 1 ====================================================================
++ *
++ * Command Buffer 1
++ *
++ * Mirror Buffer 1 <- Command buffer 1 + Mirror Buffer 0
++ *
++ * == Commit 2 ====================================================================
++ *
++ * Context Buffer 2 (optional)
++ *
++ * Command Buffer 2
++ *
++ * Mirror Buffer 2 <- Command buffer 2 + Context Buffer 2 + Mirror Buffer 1
++ *
++ * == Commit N ====================================================================
++ *
++ * For Commit N, these buffers are needed to reproduce hardware's behavior in
++ * this commit.
++ *
++ * Mirror Buffer [N - 1] : State Mirror accumlated by past commits,
++ * which is used to restore hardware state.
++ * Context Buffer [N] :
++ * Command Buffer [N] : Command buffer executed by hardware in this commit.
++ *
++ * If sequence of states programming matters, hardware's behavior can't be reproduced,
++ * but the state values stored in mirror buffer are assuring.
++ */
++
++/* Queue size. */
++#define gcdNUM_RECORDS 6
++
++typedef struct _gcsPARSER_HANDLER * gckPARSER_HANDLER;
++
++typedef void
++(*HandlerFunction)(
++ IN gckPARSER_HANDLER Handler,
++ IN gctUINT32 Addr,
++ IN gctUINT32 Data
++ );
++
++typedef struct _gcsPARSER_HANDLER
++{
++ gctUINT32 type;
++ gctUINT32 cmd;
++ gctPOINTER private;
++ HandlerFunction function;
++}
++gcsPARSER_HANDLER;
++
++typedef struct _gcsPARSER * gckPARSER;
++typedef struct _gcsPARSER
++{
++ gctUINT8_PTR currentCmdBufferAddr;
++
++ /* Current command. */
++ gctUINT32 lo;
++ gctUINT32 hi;
++
++ gctUINT8 cmdOpcode;
++ gctUINT16 cmdAddr;
++ gctUINT32 cmdSize;
++ gctUINT32 cmdRectCount;
++ gctUINT8 skip;
++ gctUINT32 skipCount;
++
++ gctBOOL allow;
++
++ /* Callback used by parser to handle a command. */
++ gckPARSER_HANDLER commandHandler;
++}
++gcsPARSER;
++
++typedef struct _gcsMIRROR
++{
++ gctUINT32_PTR logical[gcdNUM_RECORDS];
++ gctUINT32 bytes;
++ gcsSTATE_MAP_PTR map;
++ gctUINT32 stateCount;
++}
++gcsMIRROR;
++
++typedef struct _gcsDELTA
++{
++ gctUINT64 commitStamp;
++ gctUINT32_PTR command;
++ gctUINT32 commandBytes;
++ gctUINT32_PTR context;
++ gctUINT32 contextBytes;
++}
++gcsDELTA;
++
++typedef struct _gcsRECORDER
++{
++ gckOS os;
++ gcsMIRROR mirror;
++ gcsDELTA deltas[gcdNUM_RECORDS];
++
++ /* Index of current record. */
++ gctUINT index;
++
++ /* Number of records. */
++ gctUINT num;
++
++ /* Plugin used by gckPARSER. */
++ gcsPARSER_HANDLER recorderHandler;
++ gckPARSER parser;
++}
++gcsRECORDER;
++
++
++/******************************************************************************\
++***************************** Command Buffer Parser ****************************
++\******************************************************************************/
++
++/*
++** Command buffer parser checks command buffer in FE's view to make sure there
++** is no format error.
++**
++** Parser provide a callback mechnisam, so plug-in can be added to implement
++** other functions.
++*/
++
++static void
++_HandleLoadState(
++ IN OUT gckPARSER Parser
++ )
++{
++ gctUINT i;
++ gctUINT32_PTR data = (gctUINT32_PTR)Parser->currentCmdBufferAddr;
++ gctUINT32 cmdAddr = Parser->cmdAddr;
++
++ if (Parser->commandHandler == gcvNULL
++ || Parser->commandHandler->cmd != 0x01
++ )
++ {
++ /* No handler for this command. */
++ return;
++ }
++
++ for (i = 0; i < Parser->cmdSize; i++)
++ {
++ Parser->commandHandler->function(Parser->commandHandler, cmdAddr, *data);
++
++ /* Advance to next state. */
++ cmdAddr++;
++ data++;
++ }
++}
++
++static void
++_GetCommand(
++ IN OUT gckPARSER Parser
++ )
++{
++ gctUINT32 * buffer = (gctUINT32 *)Parser->currentCmdBufferAddr;
++
++ gctUINT16 cmdRectCount;
++ gctUINT16 cmdDataCount;
++
++ Parser->hi = buffer[0];
++ Parser->lo = buffer[1];
++
++ Parser->cmdOpcode = (((((gctUINT32) (Parser->hi)) >> (0 ? 31:27)) & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1)))))) );
++ Parser->cmdRectCount = 1;
++
++ switch (Parser->cmdOpcode)
++ {
++ case 0x01:
++ /* Extract count. */
++ Parser->cmdSize = (((((gctUINT32) (Parser->hi)) >> (0 ? 25:16)) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1)))))) );
++ if (Parser->cmdSize == 0)
++ {
++ /* 0 means 1024. */
++ Parser->cmdSize = 1024;
++ }
++ Parser->skip = (Parser->cmdSize & 0x1) ? 0 : 1;
++
++ /* Extract address. */
++ Parser->cmdAddr = (((((gctUINT32) (Parser->hi)) >> (0 ? 15:0)) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1)))))) );
++
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 4;
++ Parser->skipCount = Parser->cmdSize + Parser->skip;
++ break;
++
++ case 0x05:
++ Parser->cmdSize = 4;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x06:
++ Parser->cmdSize = 5;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x0C:
++ Parser->cmdSize = 3;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x09:
++ Parser->cmdSize = 2;
++ Parser->cmdAddr = 0x0F16;
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2);
++ break;
++
++ case 0x04:
++ Parser->cmdSize = 1;
++ Parser->cmdAddr = 0x0F06;
++
++ cmdRectCount = (((((gctUINT32) (Parser->hi)) >> (0 ? 15:8)) & ((gctUINT32) ((((1 ? 15:8) - (0 ? 15:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:8) - (0 ? 15:8) + 1)))))) );
++ cmdDataCount = (((((gctUINT32) (Parser->hi)) >> (0 ? 26:16)) & ((gctUINT32) ((((1 ? 26:16) - (0 ? 26:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 26:16) - (0 ? 26:16) + 1)))))) );
++
++ Parser->skipCount = gcmALIGN(Parser->cmdSize, 2)
++ + cmdRectCount * 2
++ + gcmALIGN(cmdDataCount, 2);
++
++ Parser->cmdRectCount = cmdRectCount;
++ break;
++
++ case 0x03:
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8;
++ Parser->skipCount = 0;
++ break;
++
++ case 0x02:
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr + 8;
++ Parser->skipCount = 0;
++ break;
++
++ default:
++ /* Unknown command is a risk. */
++ Parser->allow = gcvFALSE;
++ break;
++ }
++}
++
++static void
++_ParseCommand(
++ IN OUT gckPARSER Parser
++ )
++{
++ switch(Parser->cmdOpcode)
++ {
++ case 0x01:
++ _HandleLoadState(Parser);
++ break;
++ case 0x05:
++ case 0x06:
++ case 0x0C:
++ break;
++ case 0x04:
++ break;
++ default:
++ break;
++ }
++
++ /* Advance to next command. */
++ Parser->currentCmdBufferAddr = Parser->currentCmdBufferAddr
++ + (Parser->skipCount << 2);
++}
++
++gceSTATUS
++gckPARSER_Parse(
++ IN gckPARSER Parser,
++ IN gctUINT8_PTR Buffer,
++ IN gctUINT32 Bytes
++ )
++{
++ gckPARSER parser = Parser;
++ gctUINT8_PTR end = (gctUINT8_PTR)Buffer + Bytes;
++
++ /* Initialize parser. */
++ parser->currentCmdBufferAddr = (gctUINT8_PTR)Buffer;
++ parser->skip = 0;
++ parser->allow = gcvTRUE;
++
++ /* Go through command buffer until reaching the end
++ ** or meeting an error. */
++ do
++ {
++ _GetCommand(parser);
++
++ _ParseCommand(parser);
++ }
++ while ((parser->currentCmdBufferAddr < end) && (parser->allow == gcvTRUE));
++
++ if (parser->allow == gcvFALSE)
++ {
++ /* Error detected. */
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckPARSER_RegisterCommandHandler
++**
++** Register a command handler which will be called when parser get a command.
++**
++*/
++gceSTATUS
++gckPARSER_RegisterCommandHandler(
++ IN gckPARSER Parser,
++ IN gckPARSER_HANDLER Handler
++ )
++{
++ Parser->commandHandler = Handler;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckPARSER_Construct(
++ IN gckOS Os,
++ IN gckPARSER_HANDLER Handler,
++ OUT gckPARSER * Parser
++ )
++{
++ gceSTATUS status;
++ gckPARSER pointer;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsPARSER), (gctPOINTER *)&pointer));
++
++ /* Put it here temp, should have a more general plug-in mechnisam. */
++ pointer->commandHandler = Handler;
++
++ *Parser = pointer;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++void
++gckPARSER_Destroy(
++ IN gckOS Os,
++ IN gckPARSER Parser
++ )
++{
++ gcmkOS_SAFE_FREE(Os, Parser);
++}
++
++/******************************************************************************\
++**************************** Hardware States Recorder **************************
++\******************************************************************************/
++
++static void
++_RecodeState(
++ IN gckPARSER_HANDLER Handler,
++ IN gctUINT32 Addr,
++ IN gctUINT32 Data
++ )
++{
++ gcmkVERIFY_OK(gckRECORDER_UpdateMirror(Handler->private, Addr, Data));
++}
++
++static gctUINT
++_Previous(
++ IN gctUINT Index
++ )
++{
++ if (Index == 0)
++ {
++ return gcdNUM_RECORDS - 1;
++ }
++
++ return Index - 1;
++}
++
++static gctUINT
++_Next(
++ IN gctUINT Index
++ )
++{
++ return (Index + 1) % gcdNUM_RECORDS;
++}
++
++gceSTATUS
++gckRECORDER_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ OUT gckRECORDER * Recorder
++ )
++{
++ gceSTATUS status;
++ gckCONTEXT context = gcvNULL;
++ gckRECORDER recorder = gcvNULL;
++ gctUINT32 mapSize;
++ gctUINT i;
++ gctBOOL virtualCommandBuffer = Hardware->kernel->virtualCommandBuffer;
++
++ /* TODO: We only need context buffer and state map, it should be able to get without construct a
++ ** new context.
++ ** Now it is leaked, since we can't free it when command buffer is gone.
++ */
++
++ /* MMU is not ready now. */
++ Hardware->kernel->virtualCommandBuffer = gcvFALSE;
++
++ gcmkONERROR(gckCONTEXT_Construct(Os, Hardware, 0, &context));
++
++ /* Restore. */
++ Hardware->kernel->virtualCommandBuffer = virtualCommandBuffer;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsRECORDER), (gctPOINTER *)&recorder));
++
++ gckOS_ZeroMemory(recorder, gcmSIZEOF(gcsRECORDER));
++
++ /* Copy state map. */
++ recorder->mirror.stateCount = context->stateCount;
++
++ mapSize = context->stateCount * gcmSIZEOF(gcsSTATE_MAP);
++
++ gcmkONERROR(gckOS_Allocate(Os, mapSize, (gctPOINTER *)&recorder->mirror.map));
++
++ gckOS_MemCopy(recorder->mirror.map, context->map, mapSize);
++
++ /* Copy context buffer. */
++ recorder->mirror.bytes = context->totalSize;
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ gcmkONERROR(gckOS_Allocate(Os, context->totalSize, (gctPOINTER *)&recorder->mirror.logical[i]));
++ gckOS_MemCopy(recorder->mirror.logical[i], context->buffer->logical, context->totalSize);
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ /* TODO : Optimize size. */
++ gcmkONERROR(gckOS_Allocate(Os, gcdCMD_BUFFER_SIZE, (gctPOINTER *)&recorder->deltas[i].command));
++ gcmkONERROR(gckOS_Allocate(Os, context->totalSize, (gctPOINTER *)&recorder->deltas[i].context));
++ }
++
++ recorder->index = 0;
++ recorder->num = 0;
++
++ /* Initialize Parser plugin. */
++ recorder->recorderHandler.cmd = 0x01;
++ recorder->recorderHandler.private = recorder;
++ recorder->recorderHandler.function = _RecodeState;
++
++ gcmkONERROR(gckPARSER_Construct(Os, &recorder->recorderHandler, &recorder->parser));
++
++ recorder->os = Os;
++
++ *Recorder = recorder;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (recorder)
++ {
++ gckRECORDER_Destory(Os, recorder);
++ }
++
++ return status;
++}
++
++gceSTATUS
++gckRECORDER_Destory(
++ IN gckOS Os,
++ IN gckRECORDER Recorder
++ )
++{
++ gctUINT i;
++
++ if (Recorder->mirror.map)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->mirror.map);
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ if (Recorder->mirror.logical[i])
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->mirror.logical[i]);
++ }
++ }
++
++ for (i = 0; i < gcdNUM_RECORDS; i++)
++ {
++ if (Recorder->deltas[i].command)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->deltas[i].command);
++ }
++
++ if (Recorder->deltas[i].context)
++ {
++ gcmkOS_SAFE_FREE(Os, Recorder->deltas[i].context);
++ }
++ }
++
++ if (Recorder->parser)
++ {
++ gckPARSER_Destroy(Os, Recorder->parser);
++ }
++
++ gcmkOS_SAFE_FREE(Os, Recorder);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckRECORDER_UpdateMirror(
++ IN gckRECORDER Recorder,
++ IN gctUINT32 State,
++ IN gctUINT32 Data
++ )
++{
++ gctUINT32 index;
++ gcsSTATE_MAP_PTR map = Recorder->mirror.map;
++ gctUINT32_PTR buffer = Recorder->mirror.logical[Recorder->index];
++
++ if (State >= Recorder->mirror.stateCount)
++ {
++ /* Ignore them just like HW does. */
++ return gcvSTATUS_OK;
++ }
++
++ index = map[State].index;
++
++ if (index)
++ {
++ buffer[index] = Data;
++ }
++
++ return gcvSTATUS_OK;
++}
++
++void
++gckRECORDER_AdvanceIndex(
++ IN gckRECORDER Recorder,
++ IN gctUINT64 CommitStamp
++ )
++{
++ /* Get next record. */
++ gctUINT next = (Recorder->index + 1) % gcdNUM_RECORDS;
++
++ /* Record stamp of this commit. */
++ Recorder->deltas[Recorder->index].commitStamp = CommitStamp;
++
++ /* Mirror of next record is mirror of this record and delta in next record. */
++ gckOS_MemCopy(Recorder->mirror.logical[next],
++ Recorder->mirror.logical[Recorder->index], Recorder->mirror.bytes);
++
++ /* Advance to next record. */
++ Recorder->index = next;
++
++ Recorder->num = gcmMIN(Recorder->num + 1, gcdNUM_RECORDS - 1);
++
++
++ /* Reset delta. */
++ Recorder->deltas[Recorder->index].commandBytes = 0;
++ Recorder->deltas[Recorder->index].contextBytes = 0;
++}
++
++void
++gckRECORDER_Record(
++ IN gckRECORDER Recorder,
++ IN gctUINT8_PTR CommandBuffer,
++ IN gctUINT32 CommandBytes,
++ IN gctUINT8_PTR ContextBuffer,
++ IN gctUINT32 ContextBytes
++ )
++{
++ gcsDELTA * delta = &Recorder->deltas[Recorder->index];
++
++ if (CommandBytes != 0xFFFFFFFF)
++ {
++ gckPARSER_Parse(Recorder->parser, CommandBuffer, CommandBytes);
++ gckOS_MemCopy(delta->command, CommandBuffer, CommandBytes);
++ delta->commandBytes = CommandBytes;
++ }
++
++ if (ContextBytes != 0xFFFFFFFF)
++ {
++ gckPARSER_Parse(Recorder->parser, ContextBuffer, ContextBytes);
++ gckOS_MemCopy(delta->context, ContextBuffer, ContextBytes);
++ delta->contextBytes = ContextBytes;
++ }
++}
++
++void
++gckRECORDER_Dump(
++ IN gckRECORDER Recorder
++ )
++{
++ gctUINT last = Recorder->index;
++ gctUINT previous;
++ gctUINT i;
++ gcsMIRROR *mirror = &Recorder->mirror;
++ gcsDELTA *delta;
++ gckOS os = Recorder->os;
++
++ for (i = 0; i < Recorder->num; i++)
++ {
++ last = _Previous(last);
++ }
++
++ for (i = 0; i < Recorder->num; i++)
++ {
++ delta = &Recorder->deltas[last];
++
++ /* Dump record */
++ gcmkPRINT("#[commit %llu]", delta->commitStamp);
++
++ if (delta->commitStamp)
++ {
++ previous = _Previous(last);
++
++ gcmkPRINT("#[mirror]");
++ gckOS_DumpBuffer(os, mirror->logical[previous], mirror->bytes, gceDUMP_BUFFER_CONTEXT, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++ }
++
++ if (delta->contextBytes)
++ {
++ gckOS_DumpBuffer(os, delta->context, delta->contextBytes, gceDUMP_BUFFER_CONTEXT, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++ }
++
++ gckOS_DumpBuffer(os, delta->command, delta->commandBytes, gceDUMP_BUFFER_USER, gcvTRUE);
++ gcmkPRINT("@[kernel.execute]");
++
++ last = _Next(last);
++ }
++}
++
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.c 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,932 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_InitializeInfo
++**
++** Initialize architecture dependent command buffer information.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ /* Reset interrupts. */
++ Command->info.feBufferInt = -1;
++ Command->info.tsOverflowInt = -1;
++
++ /* Set command buffer attributes. */
++ Command->info.addressAlignment = 64;
++ Command->info.commandAlignment = 8;
++
++ /* Determine command alignment address mask. */
++ Command->info.addressMask = ((((gctUINT32) (Command->info.addressAlignment - 1)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0 ) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Query the number of bytes needed by the STATE command. */
++ gcmkERR_BREAK(gckVGCOMMAND_StateCommand(
++ Command, 0x0, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.stateCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RESTART command. */
++ gcmkERR_BREAK(gckVGCOMMAND_RestartCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.restartCommandSize
++ ));
++
++ /* Query the number of bytes needed by the FETCH command. */
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.fetchCommandSize
++ ));
++
++ /* Query the number of bytes needed by the CALL command. */
++ gcmkERR_BREAK(gckVGCOMMAND_CallCommand(
++ Command, gcvNULL, (gctUINT32)~0, 0,
++ &Command->info.callCommandSize
++ ));
++
++ /* Query the number of bytes needed by the RETURN command. */
++ gcmkERR_BREAK(gckVGCOMMAND_ReturnCommand(
++ Command, gcvNULL,
++ &Command->info.returnCommandSize
++ ));
++
++ /* Query the number of bytes needed by the EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, gcvNULL, gcvBLOCK_PIXEL, -1,
++ &Command->info.eventCommandSize
++ ));
++
++ /* Query the number of bytes needed by the END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command, gcvNULL, -1,
++ &Command->info.endCommandSize
++ ));
++
++ /* Determine the tail reserve size. */
++ Command->info.staticTailSize = gcmMAX(
++ Command->info.fetchCommandSize,
++ gcmMAX(
++ Command->info.returnCommandSize,
++ Command->info.endCommandSize
++ )
++ );
++
++ /* Determine the maximum tail size. */
++ Command->info.dynamicTailSize
++ = Command->info.staticTailSize
++ + Command->info.eventCommandSize * gcvBLOCK_COUNT;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_StateCommand
++**
++** Append a STATE command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctUINT32 Pipe
++** Harwdare destination pipe.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** STATE command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 Address
++** Starting register address of the state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT32 Count
++** Number of states in state buffer.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the STATE command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the STATE command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Pipe=0x%x Logical=0x%x Address=0x%x Count=0x%x Bytes = 0x%x",
++ Command, Pipe, Logical, Address, Count, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) ((gctUINT32) (Pipe) & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LOAD_STATE. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Address) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the STATE command. */
++ *Bytes = 4 * (Count + 1);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_RestartCommand
++**
++** Form a RESTART command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RESTART command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this RESTART
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this RESTART command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RESTART command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RESTART command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++ gctUINT32 beginEndMark;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Determine Begin/End flag. */
++ beginEndMark = (FetchCount > 0)
++ ? ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)))
++ : ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 24:24) - (0 ? 24:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 24:24) - (0 ? 24:24) + 1))))))) << (0 ? 24:24)));
++
++ /* Append RESTART. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x9 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)))
++ | beginEndMark;
++
++ buffer[1]
++ = FetchAddress;
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RESTART command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_FetchCommand
++**
++** Form a FETCH command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** FETCH command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this FETCH
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this FETCH command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the FETCH command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the FETCH command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append FETCH. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x5 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the FETCH command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append LINK. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x08 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the LINK command. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_CallCommand
++**
++** Append a CALL command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** CALL command at or gcvNULL to query the size of the command.
++**
++** gctUINT32 FetchAddress
++** The address of another command buffer to be executed by this CALL
++** command. If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gctUINT FetchCount
++** The number of 64-bit data quantities in another command buffer to
++** be executed by this CALL command. If 'Logical' is gcvNULL, this
++** argument is ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the CALL command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the CALL command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x FetchAddress=0x%x FetchCount=0x%x Bytes = 0x%x",
++ Command, Logical, FetchAddress, FetchCount, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append CALL. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x6 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0))) | (((gctUINT32) ((gctUINT32) (FetchCount) & ((gctUINT32) ((((1 ? 20:0) - (0 ? 20:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:0) - (0 ? 20:0) + 1))))))) << (0 ? 20:0)));
++
++ buffer[1]
++ = gcmkFIXADDRESS(FetchAddress);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the CALL command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_ReturnCommand
++**
++** Append a RETURN command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to an gckVGCOMMAND object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** RETURN command at or gcvNULL to query the size of the command.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the RETURN command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the RETURN command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Bytes = 0x%x",
++ Command, Logical, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append RETURN. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x7 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the RETURN command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EventCommand
++**
++** Form an EVENT command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** EVENT command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument is ignored.
++**
++** gceBLOCK Block
++** Block that will generate the interrupt.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the EVENT command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x Block=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, Block, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ typedef struct _gcsEVENTSTATES
++ {
++ /* Chips before VG21 use these values. */
++ gctUINT eventFromFE;
++ gctUINT eventFromPE;
++
++ /* VG21 chips and later use SOURCE field. */
++ gctUINT eventSource;
++ }
++ gcsEVENTSTATES;
++
++ static gcsEVENTSTATES states[] =
++ {
++ /* gcvBLOCK_COMMAND */
++ {
++ (gctUINT)~0,
++ (gctUINT)~0,
++ (gctUINT)~0
++ },
++
++ /* gcvBLOCK_TESSELLATOR */
++ {
++ 0x0,
++ 0x1,
++ 0x10
++ },
++
++ /* gcvBLOCK_TESSELLATOR2 */
++ {
++ 0x0,
++ 0x1,
++ 0x12
++ },
++
++ /* gcvBLOCK_TESSELLATOR3 */
++ {
++ 0x0,
++ 0x1,
++ 0x14
++ },
++
++ /* gcvBLOCK_RASTER */
++ {
++ 0x0,
++ 0x1,
++ 0x07,
++ },
++
++ /* gcvBLOCK_VG */
++ {
++ 0x0,
++ 0x1,
++ 0x0F
++ },
++
++ /* gcvBLOCK_VG2 */
++ {
++ 0x0,
++ 0x1,
++ 0x11
++ },
++
++ /* gcvBLOCK_VG3 */
++ {
++ 0x0,
++ 0x1,
++ 0x13
++ },
++
++ /* gcvBLOCK_PIXEL */
++ {
++ 0x0,
++ 0x1,
++ 0x07
++ },
++ };
++
++ /* Verify block ID. */
++ gcmkVERIFY_ARGUMENT(gcmIS_VALID_INDEX(Block, states));
++
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x3 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 11:0) - (0 ? 11:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:0) - (0 ? 11:0) + 1))))))) << (0 ? 11:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 27:16) - (0 ? 27:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 27:16) - (0 ? 27:16) + 1))))))) << (0 ? 27:16)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 13:12) - (0 ? 13:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:12) - (0 ? 13:12) + 1))))))) << (0 ? 13:12)));
++
++ /* Determine chip version. */
++ if (Command->vg21)
++ {
++ /* Get the event source for the block. */
++ gctUINT eventSource = states[Block].eventSource;
++
++ /* Supported? */
++ if (eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8))) | (((gctUINT32) ((gctUINT32) (eventSource) & ((gctUINT32) ((((1 ? 12:8) - (0 ? 12:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:8) - (0 ? 12:8) + 1))))))) << (0 ? 12:8)));
++ }
++ else
++ {
++ /* Get the event source for the block. */
++ gctUINT eventFromFE = states[Block].eventFromFE;
++ gctUINT eventFromPE = states[Block].eventFromPE;
++
++ /* Supported? */
++ if (eventFromFE == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) ((gctUINT32) (eventFromFE) & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) ((gctUINT32) (eventFromPE) & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Make sure the events are directly supported for the block. */
++ if (states[Block].eventSource == ~0)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_NOT_SUPPORTED;
++ }
++
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++ gcmkVERIFY_ARGUMENT(InterruptId <= ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))));
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ /* Determine event source. */
++ if (Block == gcvBLOCK_COMMAND)
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 5:5) - (0 ? 5:5) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 5:5) - (0 ? 5:5) + 1))))))) << (0 ? 5:5)));
++ }
++ else
++ {
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++ }
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 8;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGCOMMAND_EndCommand
++**
++** Form an END command at the specified location in the command buffer.
++**
++** INPUT:
++**
++** gckVGCOMMAND Command
++** Pointer to the Command object.
++**
++** gctPOINTER Logical
++** Pointer to the current location inside the command buffer to append
++** END command at or gcvNULL to query the size of the command.
++**
++** gctINT32 InterruptId
++** The ID of the interrupt to generate.
++** If 'Logical' is gcvNULL, this argument will be ignored.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes available for the END command.
++** If 'Logical' is gcvNULL, the value from this argument is ignored.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that will receive the number of bytes required
++** for the END command. If 'Bytes' is gcvNULL, nothing is returned.
++*/
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Logical=0x%x InterruptId=0x%x Bytes = 0x%x",
++ Command, Logical, InterruptId, Bytes);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->fe20)
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR buffer;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ buffer = (gctUINT32_PTR) Logical;
++
++ /* Append END. */
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 31:28) - (0 ? 31:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:28) - (0 ? 31:28) + 1))))))) << (0 ? 31:28)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the END command. */
++ *Bytes = 8;
++ }
++ }
++ else
++ {
++ if (Logical != gcvNULL)
++ {
++ gctUINT32_PTR memory;
++
++ /* Verify the event ID. */
++ gcmkVERIFY_ARGUMENT(InterruptId >= 0);
++
++ /* Cast the buffer pointer. */
++ memory = (gctUINT32_PTR) Logical;
++
++ /* Append EVENT. */
++ memory[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E01) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ memory[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0))) | (((gctUINT32) ((gctUINT32) (InterruptId) & ((gctUINT32) ((((1 ? 4:0) - (0 ? 4:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:0) - (0 ? 4:0) + 1))))))) << (0 ? 4:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 6:6) - (0 ? 6:6) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 6:6) - (0 ? 6:6) + 1))))))) << (0 ? 6:6)));
++
++ /* Append END. */
++ memory[2]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x02 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)));
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return number of bytes required by the EVENT and END commands. */
++ *Bytes = 16;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_command_vg.h 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,319 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_command_vg_h_
++#define __gc_hal_kernel_hardware_command_vg_h_
++
++/******************************************************************************\
++******************* Task and Interrupt Management Structures. ******************
++\******************************************************************************/
++
++/* Task storage header. */
++typedef struct _gcsTASK_STORAGE * gcsTASK_STORAGE_PTR;
++typedef struct _gcsTASK_STORAGE
++{
++ /* Next allocated storage buffer. */
++ gcsTASK_STORAGE_PTR next;
++}
++gcsTASK_STORAGE;
++
++/* Task container header. */
++typedef struct _gcsTASK_CONTAINER * gcsTASK_CONTAINER_PTR;
++typedef struct _gcsTASK_CONTAINER
++{
++ /* The number of tasks left to be processed in the container. */
++ gctINT referenceCount;
++
++ /* Size of the buffer. */
++ gctUINT size;
++
++ /* Link to the previous and the next allocated containers. */
++ gcsTASK_CONTAINER_PTR allocPrev;
++ gcsTASK_CONTAINER_PTR allocNext;
++
++ /* Link to the previous and the next containers in the free list. */
++ gcsTASK_CONTAINER_PTR freePrev;
++ gcsTASK_CONTAINER_PTR freeNext;
++}
++gcsTASK_CONTAINER;
++
++/* Kernel space task master table entry. */
++typedef struct _gcsBLOCK_TASK_ENTRY * gcsBLOCK_TASK_ENTRY_PTR;
++typedef struct _gcsBLOCK_TASK_ENTRY
++{
++ /* Pointer to the current task container for the block. */
++ gcsTASK_CONTAINER_PTR container;
++
++ /* Pointer to the current task data within the container. */
++ gcsTASK_HEADER_PTR task;
++
++ /* Pointer to the last link task within the container. */
++ gcsTASK_LINK_PTR link;
++
++ /* Number of interrupts allocated for this block. */
++ gctUINT interruptCount;
++
++ /* The index of the current interrupt. */
++ gctUINT interruptIndex;
++
++ /* Interrupt semaphore. */
++ gctSEMAPHORE interruptSemaphore;
++
++ /* Interrupt value array. */
++ gctINT32 interruptArray[32];
++}
++gcsBLOCK_TASK_ENTRY;
++
++
++/******************************************************************************\
++********************* Command Queue Management Structures. *********************
++\******************************************************************************/
++
++/* Command queue kernel element pointer. */
++typedef struct _gcsKERNEL_CMDQUEUE * gcsKERNEL_CMDQUEUE_PTR;
++
++/* Command queue object handler function type. */
++typedef gceSTATUS (* gctOBJECT_HANDLER) (
++ gckVGKERNEL Kernel,
++ gcsKERNEL_CMDQUEUE_PTR Entry
++ );
++
++/* Command queue kernel element. */
++typedef struct _gcsKERNEL_CMDQUEUE
++{
++ /* The number of buffers in the queue. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Pointer to the object handler function. */
++ gctOBJECT_HANDLER handler;
++}
++gcsKERNEL_CMDQUEUE;
++
++/* Command queue header. */
++typedef struct _gcsKERNEL_QUEUE_HEADER * gcsKERNEL_QUEUE_HEADER_PTR;
++typedef struct _gcsKERNEL_QUEUE_HEADER
++{
++ /* The size of the buffer in bytes. */
++ gctUINT size;
++
++ /* The number of pending entries to be processed. */
++ volatile gctUINT pending;
++
++ /* The current command queue entry. */
++ gcsKERNEL_CMDQUEUE_PTR currentEntry;
++
++ /* Next buffer. */
++ gcsKERNEL_QUEUE_HEADER_PTR next;
++}
++gcsKERNEL_QUEUE_HEADER;
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++/* gckVGCOMMAND object. */
++struct _gckVGCOMMAND
++{
++ /***************************************************************************
++ ** Object data and pointers.
++ */
++
++ gcsOBJECT object;
++ gckVGKERNEL kernel;
++ gckOS os;
++ gckVGHARDWARE hardware;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++
++ /***************************************************************************
++ ** Enable command queue dumping.
++ */
++
++ gctBOOL enableDumping;
++
++
++ /***************************************************************************
++ ** Bus Error interrupt.
++ */
++
++ gctINT32 busErrorInt;
++
++
++ /***************************************************************************
++ ** Command buffer information.
++ */
++
++ gcsCOMMAND_BUFFER_INFO info;
++
++
++ /***************************************************************************
++ ** Synchronization objects.
++ */
++
++ gctPOINTER queueMutex;
++ gctPOINTER taskMutex;
++ gctPOINTER commitMutex;
++
++
++ /***************************************************************************
++ ** Task management.
++ */
++
++ /* The head of the storage buffer linked list. */
++ gcsTASK_STORAGE_PTR taskStorage;
++
++ /* Allocation size. */
++ gctUINT taskStorageGranularity;
++ gctUINT taskStorageUsable;
++
++ /* The free container list. */
++ gcsTASK_CONTAINER_PTR taskFreeHead;
++ gcsTASK_CONTAINER_PTR taskFreeTail;
++
++ /* Task table */
++ gcsBLOCK_TASK_ENTRY taskTable[gcvBLOCK_COUNT];
++
++
++ /***************************************************************************
++ ** Command queue.
++ */
++
++ /* Pointer to the allocated queue memory. */
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++
++ /* Pointer to the current available queue from which new queue entries
++ will be allocated. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* If different from queueHead, points to the command queue which is
++ currently being executed by the hardware. */
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++
++ /* Points to the queue to merge the tail with when the tail is processed. */
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++
++ /* Queue overflow counter. */
++ gctUINT queueOverflow;
++
++
++ /***************************************************************************
++ ** Context.
++ */
++
++ /* Context counter used for unique ID. */
++ gctUINT64 contextCounter;
++
++ /* Current context ID. */
++ gctUINT64 currentContext;
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++ gctINT32 powerStallInt;
++ gcsCMDBUFFER_PTR powerStallBuffer;
++ gctSIGNAL powerStallSignal;
++
++};
++
++/******************************************************************************\
++************************ gckVGCOMMAND Object Internal API. ***********************
++\******************************************************************************/
++
++/* Initialize architecture dependent command buffer information. */
++gceSTATUS
++gckVGCOMMAND_InitializeInfo(
++ IN gckVGCOMMAND Command
++ );
++
++/* Form a STATE command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_StateCommand(
++ IN gckVGCOMMAND Command,
++ IN gctUINT32 Pipe,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a RESTART command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_RestartCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a FETCH command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_FetchCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a CALL command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_CallCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctUINT32 FetchAddress,
++ IN gctUINT FetchCount,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form a RETURN command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_ReturnCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form an EVENT command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EventCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gceBLOCK Block,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ );
++
++/* Form an END command at the specified location in the command buffer. */
++gceSTATUS
++gckVGCOMMAND_EndCommand(
++ IN gckVGCOMMAND Command,
++ IN gctPOINTER Logical,
++ IN gctINT32 InterruptId,
++ IN OUT gctUINT32 * Bytes
++ );
++
++#endif /* __gc_hal_kernel_hardware_command_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_vg.c 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,2119 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_HARDWARE
++
++typedef enum
++{
++ gcvPOWER_FLAG_INITIALIZE = 1 << 0,
++ gcvPOWER_FLAG_STALL = 1 << 1,
++ gcvPOWER_FLAG_STOP = 1 << 2,
++ gcvPOWER_FLAG_START = 1 << 3,
++ gcvPOWER_FLAG_RELEASE = 1 << 4,
++ gcvPOWER_FLAG_DELAY = 1 << 5,
++ gcvPOWER_FLAG_SAVE = 1 << 6,
++ gcvPOWER_FLAG_ACQUIRE = 1 << 7,
++ gcvPOWER_FLAG_POWER_OFF = 1 << 8,
++ gcvPOWER_FLAG_CLOCK_OFF = 1 << 9,
++ gcvPOWER_FLAG_CLOCK_ON = 1 << 10,
++ gcvPOWER_FLAG_NOP = 1 << 11,
++}
++gcePOWER_FLAGS;
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_ResetGPU(
++ IN gckOS Os
++ )
++{
++ gctUINT32 control, idle;
++ gceSTATUS status;
++
++ /* Read register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ for (;;)
++ {
++ /* Disable clock gating. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00104,
++ 0x00000000));
++
++ /* Wait for clock being stable. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Isolate the GPU. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Set soft reset. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Wait for reset. */
++ gcmkONERROR(gckOS_Delay(Os, 1));
++
++ /* Reset soft reset bit. */
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 12:12) - (0 ? 12:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 12:12) - (0 ? 12:12) + 1))))))) << (0 ? 12:12)))));
++
++ /* Reset GPU isolation. */
++ control = ((((gctUINT32) (control)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 19:19) - (0 ? 19:19) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 19:19) - (0 ? 19:19) + 1))))))) << (0 ? 19:19)));
++
++ gcmkONERROR(gckOS_WriteRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ control));
++
++ /* Read idle register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00004,
++ &idle));
++
++ if ((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) == 0)
++ {
++ continue;
++ }
++
++ /* Read reset register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(Os,
++ gcvCORE_VG,
++ 0x00000,
++ &control));
++
++ if (((((((gctUINT32) (control)) >> (0 ? 16:16)) & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1)))))) ) == 0)
++ || ((((((gctUINT32) (control)) >> (0 ? 17:17)) & ((gctUINT32) ((((1 ? 17:17) - (0 ? 17:17) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 17:17) - (0 ? 17:17) + 1)))))) ) == 0)
++ )
++ {
++ continue;
++ }
++
++ /* GPU is idle. */
++ break;
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the error. */
++ return status;
++}
++
++
++static gceSTATUS
++_IdentifyHardware(
++ IN gckOS Os,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32 * ChipFeatures,
++ OUT gctUINT32 * ChipMinorFeatures,
++ OUT gctUINT32 * ChipMinorFeatures2
++ )
++{
++ gceSTATUS status;
++ gctUINT32 chipIdentity;
++
++ do
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG, 0x00018, &chipIdentity));
++
++ /* Special case for older graphic cores. */
++ if (((((gctUINT32) (chipIdentity)) >> (0 ? 31:24) & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1)))))) == (0x01 & ((gctUINT32) ((((1 ? 31:24) - (0 ? 31:24) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:24) - (0 ? 31:24) + 1))))))))
++ {
++ *ChipModel = gcv500;
++ *ChipRevision = (((((gctUINT32) (chipIdentity)) >> (0 ? 15:12)) & ((gctUINT32) ((((1 ? 15:12) - (0 ? 15:12) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:12) - (0 ? 15:12) + 1)))))) );
++ }
++
++ else
++ {
++ /* Read chip identity register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00020,
++ (gctUINT32 *) ChipModel));
++
++ /* Read CHIP_REV register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Os, gcvCORE_VG,
++ 0x00024,
++ ChipRevision));
++ }
++
++ /* Read chip feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x0001C, ChipFeatures
++ ));
++
++ /* Read chip minor feature register. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00034, ChipMinorFeatures
++ ));
++
++ /* Read chip minor feature register #2. */
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(
++ Os, gcvCORE_VG, 0x00074, ChipMinorFeatures2
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_VERBOSE,
++ "ChipModel=0x%08X\n"
++ "ChipRevision=0x%08X\n"
++ "ChipFeatures=0x%08X\n"
++ "ChipMinorFeatures=0x%08X\n"
++ "ChipMinorFeatures2=0x%08X\n",
++ *ChipModel,
++ *ChipRevision,
++ *ChipFeatures,
++ *ChipMinorFeatures,
++ *ChipMinorFeatures2
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++void
++_VGPowerTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckVGHARDWARE hardware = (gckVGHARDWARE)Data;
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SetPowerManagementState(hardware, gcvPOWER_OFF_TIMEOUT));
++}
++#endif
++
++/******************************************************************************\
++****************************** gckVGHARDWARE API code *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Construct
++**
++** Construct a new gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an initialized gckOS object.
++**
++** OUTPUT:
++**
++** gckVGHARDWARE * Hardware
++** Pointer to a variable that will hold the pointer to the gckVGHARDWARE
++** object.
++*/
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ )
++{
++ gckVGHARDWARE hardware = gcvNULL;
++ gceSTATUS status;
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x ", Os, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ do
++ {
++ gcmkERR_BREAK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ status = _ResetGPU(Os);
++
++ if (status != gcvSTATUS_OK)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "_ResetGPU failed: status=%d\n", status);
++ }
++
++ /* Identify the hardware. */
++ gcmkERR_BREAK(_IdentifyHardware(Os,
++ &chipModel, &chipRevision,
++ &chipFeatures, &chipMinorFeatures, &chipMinorFeatures2
++ ));
++
++ /* Allocate the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckVGHARDWARE), (gctPOINTER *) &hardware
++ ));
++
++ /* Initialize the gckVGHARDWARE object. */
++ hardware->object.type = gcvOBJ_HARDWARE;
++ hardware->os = Os;
++
++ /* Set chip identity. */
++ hardware->chipModel = chipModel;
++ hardware->chipRevision = chipRevision;
++ hardware->chipFeatures = chipFeatures;
++ hardware->chipMinorFeatures = chipMinorFeatures;
++ hardware->chipMinorFeatures2 = chipMinorFeatures2;
++
++ hardware->powerMutex = gcvNULL;
++ hardware->chipPowerState = gcvPOWER_ON;
++ hardware->chipPowerStateGlobal = gcvPOWER_ON;
++ hardware->clockState = gcvTRUE;
++ hardware->powerState = gcvTRUE;
++
++#if gcdPOWEROFF_TIMEOUT
++ hardware->powerOffTime = 0;
++ hardware->powerOffTimeout = gcdPOWEROFF_TIMEOUT;
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(Os,
++ _VGPowerTimerFunction,
++ (gctPOINTER)hardware,
++ &hardware->powerOffTimer));
++#endif
++
++ /* Determine whether FE 2.0 is present. */
++ hardware->fe20 = ((((gctUINT32) (hardware->chipFeatures)) >> (0 ? 28:28) & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 28:28) - (0 ? 28:28) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 28:28) - (0 ? 28:28) + 1)))))));
++
++ /* Determine whether VG 2.0 is present. */
++ hardware->vg20 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 13:13) & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 13:13) - (0 ? 13:13) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 13:13) - (0 ? 13:13) + 1)))))));
++
++ /* Determine whether VG 2.1 is present. */
++ hardware->vg21 = ((((gctUINT32) (hardware->chipMinorFeatures)) >> (0 ? 18:18) & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))) == (0x1 & ((gctUINT32) ((((1 ? 18:18) - (0 ? 18:18) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 18:18) - (0 ? 18:18) + 1)))))));
++
++ /* Set default event mask. */
++ hardware->eventMask = 0xFFFFFFFF;
++
++ gcmkERR_BREAK(gckOS_AtomConstruct(Os, &hardware->pageTableDirty));
++
++ /* Set fast clear to auto. */
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(hardware, -1));
++
++ gcmkERR_BREAK(gckOS_CreateMutex(Os, &hardware->powerMutex));
++
++ /* Enable power management by default. */
++ hardware->powerManagement = gcvTRUE;
++
++ /* Return pointer to the gckVGHARDWARE object. */
++ *Hardware = hardware;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++#if gcdPOWEROFF_TIMEOUT
++ if (hardware->powerOffTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, hardware->powerOffTimer));
++ }
++#endif
++
++ gcmkVERIFY_OK(gckOS_SetGPUPower(Os, gcvCORE_VG, gcvFALSE, gcvFALSE));
++
++ if (hardware != gcvNULL && hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, hardware->pageTableDirty));
++ }
++
++ if (hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Free(Os, hardware));
++ }
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Destroy
++**
++** Destroy an gckVGHARDWARE object.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Mark the object as unknown. */
++ Hardware->object.type = gcvOBJ_UNKNOWN;
++
++ if (Hardware->powerMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(
++ Hardware->os, Hardware->powerMutex));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ gcmkVERIFY_OK(gckOS_StopTimer(Hardware->os, Hardware->powerOffTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Hardware->os, Hardware->powerOffTimer));
++#endif
++
++ if (Hardware->pageTableDirty != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Hardware->os, Hardware->pageTableDirty));
++ }
++
++ /* Free the object. */
++ status = gckOS_Free(Hardware->os, Hardware);
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryMemory
++**
++** Query the amount of memory available on the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * InternalSize
++** Pointer to a variable that will hold the size of the internal video
++** memory in bytes. If 'InternalSize' is gcvNULL, no information of the
++** internal memory will be returned.
++**
++** gctUINT32 * InternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * InternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the internal video memory. This pointer cannot be gcvNULL if
++** 'InternalSize' is also non-gcvNULL.
++**
++** gctSIZE_T * ExternalSize
++** Pointer to a variable that will hold the size of the external video
++** memory in bytes. If 'ExternalSize' is gcvNULL, no information of the
++** external memory will be returned.
++**
++** gctUINT32 * ExternalBaseAddress
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * ExternalAlignment
++** Pointer to a variable that will hold the hardware's base address for
++** the external video memory. This pointer cannot be gcvNULL if
++** 'ExternalSize' is also non-gcvNULL.
++**
++** gctUINT32 * HorizontalTileSize
++** Number of horizontal pixels per tile. If 'HorizontalTileSize' is
++** gcvNULL, no horizontal pixel per tile will be returned.
++**
++** gctUINT32 * VerticalTileSize
++** Number of vertical pixels per tile. If 'VerticalTileSize' is
++** gcvNULL, no vertical pixel per tile will be returned.
++*/
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x InternalSize=0x%x InternalBaseAddress=0x%x InternalAlignment=0x%x"
++ "ExternalSize=0x%x ExternalBaseAddress=0x%x ExternalAlignment=0x%x HorizontalTileSize=0x%x VerticalTileSize=0x%x",
++ Hardware, InternalSize, InternalBaseAddress, InternalAlignment,
++ ExternalSize, ExternalBaseAddress, ExternalAlignment, HorizontalTileSize, VerticalTileSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (InternalSize != gcvNULL)
++ {
++ /* No internal memory. */
++ *InternalSize = 0;
++ }
++
++ if (ExternalSize != gcvNULL)
++ {
++ /* No external memory. */
++ *ExternalSize = 0;
++ }
++
++ if (HorizontalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *HorizontalTileSize = 4;
++ }
++
++ if (VerticalTileSize != gcvNULL)
++ {
++ /* 4x4 tiles. */
++ *VerticalTileSize = 4;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QueryChipIdentity
++**
++** Query the identity of the hardware.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gceCHIPMODEL * ChipModel
++** If 'ChipModel' is not gcvNULL, the variable it points to will
++** receive the model of the chip.
++**
++** gctUINT32 * ChipRevision
++** If 'ChipRevision' is not gcvNULL, the variable it points to will
++** receive the revision of the chip.
++**
++** gctUINT32 * ChipFeatures
++** If 'ChipFeatures' is not gcvNULL, the variable it points to will
++** receive the feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures
++** If 'ChipMinorFeatures' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++** gctUINT32 * ChipMinorFeatures2
++** If 'ChipMinorFeatures2' is not gcvNULL, the variable it points to
++** will receive the minor feature set of the chip.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL * ChipModel,
++ OUT gctUINT32 * ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures2
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x ChipModel=0x%x ChipRevision=0x%x ChipFeatures = 0x%x ChipMinorFeatures = 0x%x ChipMinorFeatures2 = 0x%x",
++ Hardware, ChipModel, ChipRevision, ChipFeatures, ChipMinorFeatures, ChipMinorFeatures2);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Return chip model. */
++ if (ChipModel != gcvNULL)
++ {
++ *ChipModel = Hardware->chipModel;
++ }
++
++ /* Return revision number. */
++ if (ChipRevision != gcvNULL)
++ {
++ *ChipRevision = Hardware->chipRevision;
++ }
++
++ /* Return feature set. */
++ if (ChipFeatures != gcvNULL)
++ {
++ gctUINT32 features = Hardware->chipFeatures;
++
++ if ((((((gctUINT32) (features)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) ((gctUINT32) (Hardware->allowFastClear) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)));
++ }
++
++ /* Mark 2D pipe as available for GC500.0 since it did not have this *\
++ \* bit. */
++ if ((Hardware->chipModel == gcv500)
++ && (Hardware->chipRevision == 0)
++ )
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ /* Mark 2D pipe as available for GC300 since it did not have this *\
++ \* bit. */
++ if (Hardware->chipModel == gcv300)
++ {
++ features = ((((gctUINT32) (features)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1))))))) << (0 ? 9:9)));
++ }
++
++ *ChipFeatures = features;
++ }
++
++ /* Return minor feature set. */
++ if (ChipMinorFeatures != gcvNULL)
++ {
++ *ChipMinorFeatures = Hardware->chipMinorFeatures;
++ }
++
++ /* Return minor feature set #2. */
++ if (ChipMinorFeatures2 != gcvNULL)
++ {
++ *ChipMinorFeatures2 = Hardware->chipMinorFeatures2;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertFormat
++**
++** Convert an API format to hardware parameters.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gceSURF_FORMAT Format
++** API format to convert.
++**
++** OUTPUT:
++**
++** gctUINT32 * BitsPerPixel
++** Pointer to a variable that will hold the number of bits per pixel.
++**
++** gctUINT32 * BytesPerTile
++** Pointer to a variable that will hold the number of bytes per tile.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ )
++{
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bytesPerTile;
++
++ gcmkHEADER_ARG("Hardware=0x%x Format=0x%x BitsPerPixel=0x%x BytesPerTile = 0x%x",
++ Hardware, Format, BitsPerPixel, BytesPerTile);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Dispatch on format. */
++ switch (Format)
++ {
++ case gcvSURF_A1:
++ case gcvSURF_L1:
++ /* 1-bpp format. */
++ bitsPerPixel = 1;
++ bytesPerTile = (1 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_A4:
++ /* 4-bpp format. */
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_INDEX8:
++ case gcvSURF_A8:
++ case gcvSURF_L8:
++ /* 8-bpp format. */
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_YV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_NV12:
++ /* 12-bpp planar YUV formats. */
++ bitsPerPixel = 12;
++ bytesPerTile = (12 * 4 * 4) / 8;
++ break;
++
++ /* 4444 variations. */
++ case gcvSURF_X4R4G4B4:
++ case gcvSURF_A4R4G4B4:
++ case gcvSURF_R4G4B4X4:
++ case gcvSURF_R4G4B4A4:
++ case gcvSURF_B4G4R4X4:
++ case gcvSURF_B4G4R4A4:
++ case gcvSURF_X4B4G4R4:
++ case gcvSURF_A4B4G4R4:
++
++ /* 1555 variations. */
++ case gcvSURF_X1R5G5B5:
++ case gcvSURF_A1R5G5B5:
++ case gcvSURF_R5G5B5X1:
++ case gcvSURF_R5G5B5A1:
++ case gcvSURF_X1B5G5R5:
++ case gcvSURF_A1B5G5R5:
++ case gcvSURF_B5G5R5X1:
++ case gcvSURF_B5G5R5A1:
++
++ /* 565 variations. */
++ case gcvSURF_R5G6B5:
++ case gcvSURF_B5G6R5:
++
++ case gcvSURF_A8L8:
++ case gcvSURF_YUY2:
++ case gcvSURF_UYVY:
++ case gcvSURF_D16:
++ /* 16-bpp format. */
++ bitsPerPixel = 16;
++ bytesPerTile = (16 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_X8R8G8B8:
++ case gcvSURF_A8R8G8B8:
++ case gcvSURF_X8B8G8R8:
++ case gcvSURF_A8B8G8R8:
++ case gcvSURF_R8G8B8X8:
++ case gcvSURF_R8G8B8A8:
++ case gcvSURF_B8G8R8X8:
++ case gcvSURF_B8G8R8A8:
++ case gcvSURF_D32:
++ /* 32-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_D24S8:
++ /* 24-bpp format. */
++ bitsPerPixel = 32;
++ bytesPerTile = (32 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT1:
++ case gcvSURF_ETC1:
++ bitsPerPixel = 4;
++ bytesPerTile = (4 * 4 * 4) / 8;
++ break;
++
++ case gcvSURF_DXT2:
++ case gcvSURF_DXT3:
++ case gcvSURF_DXT4:
++ case gcvSURF_DXT5:
++ bitsPerPixel = 8;
++ bytesPerTile = (8 * 4 * 4) / 8;
++ break;
++
++ default:
++ /* Invalid format. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Set the result. */
++ if (BitsPerPixel != gcvNULL)
++ {
++ * BitsPerPixel = bitsPerPixel;
++ }
++
++ if (BytesPerTile != gcvNULL)
++ {
++ * BytesPerTile = bytesPerTile;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SplitMemory
++**
++** Split a hardware specific memory address into a pool and offset.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address in hardware specific format.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to a variable that will hold the pool type for the address.
++**
++** gctUINT32 * Offset
++** Pointer to a variable that will hold the offset for the address.
++*/
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Pool=0x%x Offset = 0x%x",
++ Hardware, Address, Pool, Offset);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Offset != gcvNULL);
++
++ /* Dispatch on memory type. */
++ switch ((((((gctUINT32) (Address)) >> (0 ? 1:0)) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1)))))) ))
++ {
++ case 0x0:
++ /* System memory. */
++ *Pool = gcvPOOL_SYSTEM;
++ break;
++
++ case 0x2:
++ /* Virtual memory. */
++ *Pool = gcvPOOL_VIRTUAL;
++ break;
++
++ default:
++ /* Invalid memory type. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Return offset of address. */
++ *Offset = ((((gctUINT32) (Address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) ((gctUINT32) (0) & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_Execute
++**
++** Kickstart the hardware's command processor with an initialized command
++** buffer.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to the gckVGHARDWARE object.
++**
++** gctUINT32 Address
++** Address of the command buffer.
++**
++** gctSIZE_T Count
++** Number of command-sized data units to be executed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctUINT32 Count
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Address=0x%x Count=0x%x",
++ Hardware, Address, Count);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ /* Enable all events. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00014,
++ Hardware->eventMask
++ ));
++
++ if (Hardware->fe20)
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00500,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00504,
++ Count
++ ));
++ }
++ else
++ {
++ /* Write address register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00654,
++ gcmkFIXADDRESS(Address)
++ ));
++
++ /* Write control register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(
++ Hardware->os,
++ gcvCORE_VG,
++ 0x00658,
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 16:16) - (0 ? 16:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 16:16) - (0 ? 16:16) + 1))))))) << (0 ? 16:16))) |
++ ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (Count) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_AlignToTile
++**
++** Align the specified width and height to tile boundaries.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gceSURF_TYPE Type
++** Type of alignment.
++**
++** gctUINT32 * Width
++** Pointer to the width to be aligned. If 'Width' is gcvNULL, no width
++** will be aligned.
++**
++** gctUINT32 * Height
++** Pointer to the height to be aligned. If 'Height' is gcvNULL, no height
++** will be aligned.
++**
++** OUTPUT:
++**
++** gctUINT32 * Width
++** Pointer to a variable that will receive the aligned width.
++**
++** gctUINT32 * Height
++** Pointer to a variable that will receive the aligned height.
++*/
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32 * Width,
++ IN OUT gctUINT32 * Height
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Type=0x%x Width=0x%x Height=0x%x",
++ Hardware, Type, Width, Height);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (Width != gcvNULL)
++ {
++ /* Align the width. */
++ *Width = gcmALIGN(*Width, (Type == gcvSURF_TEXTURE) ? 4 : 16);
++ }
++
++ if (Height != gcvNULL)
++ {
++ /* Special case for VG images. */
++ if ((*Height == 0) && (Type == gcvSURF_IMAGE))
++ {
++ *Height = 4;
++ }
++ else
++ {
++ /* Align the height. */
++ *Height = gcmALIGN(*Height, 4);
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_ConvertLogical
++**
++** Convert a logical system address into a hardware specific address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Hardware
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address to convert.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the memory in user space.
++**
++** gctUINT32* Address
++** Return hardware specific address.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctBOOL InUserSpace,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x InUserSpace=%d Address=0x%x",
++ Hardware, Logical, InUserSpace, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ /* Convert logical address into a physical address. */
++ if (InUserSpace)
++ {
++ gcmkERR_BREAK(gckOS_UserLogicalToPhysical(
++ Hardware->os, Logical, &address
++ ));
++ }
++ else
++ {
++ gcmkERR_BREAK(gckOS_GetPhysicalAddress(
++ Hardware->os, Logical, &address
++ ));
++ }
++
++ /* Return hardware specific address. */
++ *Address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_QuerySystemMemory
++**
++** Query the command buffer alignment and number of reserved bytes.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * SystemSize
++** Pointer to a variable that receives the maximum size of the system
++** memory.
++**
++** gctUINT32 * SystemBaseAddress
++** Poinetr to a variable that receives the base address for system
++** memory.
++*/
++gceSTATUS gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x SystemSize=0x%x SystemBaseAddress=0x%x",
++ Hardware, SystemSize, SystemBaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ if (SystemSize != gcvNULL)
++ {
++ /* Maximum system memory can be 2GB. */
++ *SystemSize = (gctSIZE_T)(1 << 31);
++ }
++
++ if (SystemBaseAddress != gcvNULL)
++ {
++ /* Set system memory base address. */
++ *SystemBaseAddress = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x0 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetMMU
++**
++** Set the page table base address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctPOINTER Logical
++** Logical address of the page table.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ )
++{
++ gceSTATUS status;
++ gctUINT32 address = 0;
++
++ gcmkHEADER_ARG("Hardware=0x%x Logical=0x%x",
++ Hardware, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ do
++ {
++ /* Convert the logical address into an hardware address. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertLogical(Hardware, Logical,
++ gcvFALSE, &address));
++
++ /* Write the AQMemoryFePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00400,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryTxPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00404,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryPePageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00408,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryPezPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x0040C,
++ gcmkFIXADDRESS(address)));
++
++ /* Write the AQMemoryRaPageTable register. */
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00410,
++ gcmkFIXADDRESS(address)));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_FlushMMU
++**
++** Flush the page table.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x ", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT32_PTR buffer;
++
++ /* Create a shortcut to the command buffer object. */
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &commandBuffer, (gctPOINTER *) &buffer
++ ));
++
++ buffer[0]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27))) | (((gctUINT32) (0x01 & ((gctUINT32) ((((1 ? 31:27) - (0 ? 31:27) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 31:27) - (0 ? 31:27) + 1))))))) << (0 ? 31:27)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0))) | (((gctUINT32) ((gctUINT32) (0x0E04) & ((gctUINT32) ((((1 ? 15:0) - (0 ? 15:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 15:0) - (0 ? 15:0) + 1))))))) << (0 ? 15:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16))) | (((gctUINT32) ((gctUINT32) (1) & ((gctUINT32) ((((1 ? 25:16) - (0 ? 25:16) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 25:16) - (0 ? 25:16) + 1))))))) << (0 ? 25:16)));
++
++ buffer[1]
++ = ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1))))))) << (0 ? 0:0)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 1:1) - (0 ? 1:1) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:1) - (0 ? 1:1) + 1))))))) << (0 ? 1:1)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 2:2) - (0 ? 2:2) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 2:2) - (0 ? 2:2) + 1))))))) << (0 ? 2:2)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 3:3) - (0 ? 3:3) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 3:3) - (0 ? 3:3) + 1))))))) << (0 ? 3:3)))
++ | ((((gctUINT32) (0)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4))) | (((gctUINT32) (0x1 & ((gctUINT32) ((((1 ? 4:4) - (0 ? 4:4) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 4:4) - (0 ? 4:4) + 1))))))) << (0 ? 4:4)));
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_BuildVirtualAddress
++**
++** Build a virtual address.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckVGHARDWARE object.
++**
++** gctUINT32 Index
++** Index into page table.
++**
++** gctUINT32 Offset
++** Offset into page.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable receiving te hardware address.
++*/
++gceSTATUS gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ )
++{
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Hardware=0x%x Index=0x%x Offset=0x%x Address=0x%x",
++ Hardware, Index, Offset, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Build virtual address. */
++ address = (Index << 12) | Offset;
++
++ /* Set virtual type. */
++ address = ((((gctUINT32) (address)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0))) | (((gctUINT32) (0x2 & ((gctUINT32) ((((1 ? 1:0) - (0 ? 1:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 1:0) - (0 ? 1:0) + 1))))))) << (0 ? 1:0)));
++
++ /* Set the result. */
++ *Address = address;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x Data=0x%x", Hardware, Data);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ /* Read register and return. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, Data);
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ )
++{
++ gctUINT32 debug;
++ gceSTATUS status;
++
++ if (!(((((gctUINT32) (Hardware->chipFeatures)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ))
++ {
++ return gcvSTATUS_OK;
++ }
++
++ do
++ {
++ if (Enable == -1)
++ {
++ Enable = (Hardware->chipModel > gcv500) ||
++ ((Hardware->chipModel == gcv500) && (Hardware->chipRevision >= 3));
++ }
++
++ gcmkERR_BREAK(gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ &debug));
++
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? 20:20) - (0 ? 20:20) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 20:20) - (0 ? 20:20) + 1))))))) << (0 ? 20:20)));
++
++#ifdef AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION
++ debug = ((((gctUINT32) (debug)) & ~(((gctUINT32) (((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION))) | (((gctUINT32) ((gctUINT32) (Enable == 0) & ((gctUINT32) ((((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1) == 32) ? ~0 : (~(~0 << ((1 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) - (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION) + 1))))))) << (0 ? AQ_MEMORY_DEBUG_DISABLE_Z_COMPRESSION)));
++#endif
++
++ gcmkERR_BREAK(gckOS_WriteRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00414,
++ debug));
++
++ Hardware->allowFastClear = Enable;
++
++ status = gcvFALSE;
++ }
++ while (gcvFALSE);
++
++ return status;
++}
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Hardware=0x%x IDs=0x%x", Hardware, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IDs != gcvNULL);
++
++ /* Read AQIntrAcknowledge register. */
++ status = gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG,
++ 0x00010,
++ IDs);
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS _CommandStall(
++ gckVGHARDWARE Hardware)
++{
++ gceSTATUS status;
++ gckVGCOMMAND command;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ do
++ {
++ gctUINT32_PTR buffer;
++ command = Hardware->kernel->command;
++
++ /* Allocate command buffer space. */
++ gcmkERR_BREAK(gckVGCOMMAND_Allocate(
++ command, 8, &command->powerStallBuffer,
++ (gctPOINTER *) &buffer
++ ));
++
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ command, buffer, gcvBLOCK_PIXEL,
++ command->powerStallInt, gcvNULL));
++
++ gcmkERR_BREAK(gckVGCOMMAND_Execute(
++ command,
++ command->powerStallBuffer
++ ));
++
++ /* Wait the signal. */
++ gcmkERR_BREAK(gckOS_WaitSignal(
++ command->os,
++ command->powerStallSignal,
++ command->kernel->kernel->timeOut));
++
++
++ }
++ while(gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_SetPowerManagementState
++**
++** Set GPU to a specified power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ )
++{
++ gceSTATUS status;
++ gckVGCOMMAND command = gcvNULL;
++ gckOS os;
++ gctUINT flag/*, clock*/;
++
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL stall = gcvTRUE;
++ gctBOOL commitMutex = gcvFALSE;
++ gctBOOL mutexAcquired = gcvFALSE;
++
++#if gcdPOWEROFF_TIMEOUT
++ gctBOOL timeout = gcvFALSE;
++ gctBOOL isAfter = gcvFALSE;
++ gctUINT32 currentTime;
++#endif
++
++ gctBOOL broadcast = gcvFALSE;
++ gctUINT32 process, thread;
++ gctBOOL global = gcvFALSE;
++
++#if gcdENABLE_PROFILING
++ gctUINT64 time, freq, mutexTime, onTime, stallTime, stopTime, delayTime,
++ initTime, offTime, startTime, totalTime;
++#endif
++
++ /* State transition flags. */
++ static const gctUINT flags[4][4] =
++ {
++ /* gcvPOWER_ON */
++ { /* ON */ 0,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_NOP,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STALL |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_OFF */
++ { /* ON */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* OFF */ 0,
++ /* IDLE */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY,
++ /* SUSPEND */ gcvPOWER_FLAG_INITIALIZE |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_IDLE */
++ { /* ON */ gcvPOWER_FLAG_NOP,
++ /* OFF */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ 0,
++ /* SUSPEND */ gcvPOWER_FLAG_ACQUIRE |
++ gcvPOWER_FLAG_STOP |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ },
++
++ /* gcvPOWER_SUSPEND */
++ { /* ON */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* OFF */ gcvPOWER_FLAG_SAVE |
++ gcvPOWER_FLAG_POWER_OFF |
++ gcvPOWER_FLAG_CLOCK_OFF,
++ /* IDLE */ gcvPOWER_FLAG_START |
++ gcvPOWER_FLAG_DELAY |
++ gcvPOWER_FLAG_RELEASE |
++ gcvPOWER_FLAG_CLOCK_ON,
++ /* SUSPEND */ 0,
++ },
++ };
++
++ gcmkHEADER_ARG("Hardware=0x%x State=%d", Hardware, State);
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "Switching to power state %d",
++ State);
++#endif
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ /* Get the gckOS object pointer. */
++ os = Hardware->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Get the gckCOMMAND object pointer. */
++ gcmkVERIFY_OBJECT(Hardware->kernel, gcvOBJ_KERNEL);
++ command = Hardware->kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ if (Hardware->powerManagement == gcvFALSE)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Start profiler. */
++ gcmkPROFILE_INIT(freq, time);
++
++ /* Convert the broadcast power state. */
++ switch (State)
++ {
++ case gcvPOWER_SUSPEND_ATPOWERON:
++ /* Convert to SUSPEND and don't wait for STALL. */
++ State = gcvPOWER_SUSPEND;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_OFF_ATPOWERON:
++ /* Convert to OFF and don't wait for STALL. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ break;
++
++ case gcvPOWER_IDLE_BROADCAST:
++ /* Convert to IDLE and note we are inside broadcast. */
++ State = gcvPOWER_IDLE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_SUSPEND_BROADCAST:
++ /* Convert to SUSPEND and note we are inside broadcast. */
++ State = gcvPOWER_SUSPEND;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_BROADCAST:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_OFF_RECOVERY:
++ /* Convert to OFF and note we are inside recovery. */
++ State = gcvPOWER_OFF;
++ stall = gcvFALSE;
++ broadcast = gcvTRUE;
++ break;
++
++ case gcvPOWER_ON_AUTO:
++ /* Convert to ON and note we are inside recovery. */
++ State = gcvPOWER_ON;
++ break;
++
++ case gcvPOWER_ON:
++ case gcvPOWER_IDLE:
++ case gcvPOWER_SUSPEND:
++ case gcvPOWER_OFF:
++ /* Mark as global power management. */
++ global = gcvTRUE;
++ break;
++
++#if gcdPOWEROFF_TIMEOUT
++ case gcvPOWER_OFF_TIMEOUT:
++ /* Convert to OFF and note we are inside broadcast. */
++ State = gcvPOWER_OFF;
++ broadcast = gcvTRUE;
++ /* Check time out */
++ timeout = gcvTRUE;
++ break;
++#endif
++
++ default:
++ break;
++ }
++
++ /* Get current process and thread IDs. */
++ gcmkONERROR(gckOS_GetProcessID(&process));
++ gcmkONERROR(gckOS_GetThreadID(&thread));
++
++ /* Acquire the power mutex. */
++ if (broadcast)
++ {
++ /* Try to acquire the power mutex. */
++ status = gckOS_AcquireMutex(os, Hardware->powerMutex, 0);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Check if we already own this mutex. */
++ if ((Hardware->powerProcess == process)
++ && (Hardware->powerThread == thread)
++ )
++ {
++ /* Bail out on recursive power management. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ else if (State == gcvPOWER_IDLE)
++ {
++ /* gcvPOWER_IDLE_BROADCAST is from IST,
++ ** so waiting here will cause deadlock,
++ ** if lock holder call gckCOMMAND_Stall() */
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os,
++ Hardware->powerMutex,
++ gcvINFINITE));
++ }
++ }
++ }
++ else
++ {
++ /* Acquire the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(os, Hardware->powerMutex, gcvINFINITE));
++ }
++
++ /* Get time until mtuex acquired. */
++ gcmkPROFILE_QUERY(time, mutexTime);
++
++ Hardware->powerProcess = process;
++ Hardware->powerThread = thread;
++ mutexAcquired = gcvTRUE;
++
++ /* Grab control flags and clock. */
++ flag = flags[Hardware->chipPowerState][State];
++ /*clock = clocks[State];*/
++
++#if gcdPOWEROFF_TIMEOUT
++ if (timeout)
++ {
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ gcmkONERROR(
++ gckOS_TicksAfter(Hardware->powerOffTime, currentTime, &isAfter));
++
++ /* powerOffTime is pushed forward, give up.*/
++ if (isAfter
++ /* Expect a transition start from IDLE. */
++ || (Hardware->chipPowerState == gcvPOWER_ON)
++ || (Hardware->chipPowerState == gcvPOWER_OFF)
++ )
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++#endif
++
++ if (flag == 0)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* internal power control */
++ if (!global)
++ {
++ if (Hardware->chipPowerStateGlobal == gcvPOWER_OFF)
++ {
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* No need to do anything. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++ acquired = gcvTRUE;
++
++ /* avoid acquiring again. */
++ flag &= ~gcvPOWER_FLAG_ACQUIRE;
++ }
++ }
++
++ if (flag & (gcvPOWER_FLAG_INITIALIZE | gcvPOWER_FLAG_CLOCK_ON))
++ {
++ /* Turn on the power. */
++ gcmkONERROR(gckOS_SetGPUPower(os, gcvCORE_VG, gcvTRUE, gcvTRUE));
++
++ /* Mark clock and power as enabled. */
++ Hardware->clockState = gcvTRUE;
++ Hardware->powerState = gcvTRUE;
++ }
++
++ /* Get time until powered on. */
++ gcmkPROFILE_QUERY(time, onTime);
++
++ if ((flag & gcvPOWER_FLAG_STALL) && stall)
++ {
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ command->os,
++ command->commitMutex,
++ gcvINFINITE
++ ));
++
++ commitMutex = gcvTRUE;
++
++ gcmkONERROR(_CommandStall(Hardware));
++ }
++
++ /* Get time until stalled. */
++ gcmkPROFILE_QUERY(time, stallTime);
++
++ if (flag & gcvPOWER_FLAG_ACQUIRE)
++ {
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(os, command->powerSemaphore));
++
++ acquired = gcvTRUE;
++ }
++
++
++ /* Get time until stopped. */
++ gcmkPROFILE_QUERY(time, stopTime);
++
++
++ if (flag & gcvPOWER_FLAG_DELAY)
++ {
++ /* Wait for the specified amount of time to settle coming back from
++ ** power-off or suspend state. */
++ gcmkONERROR(gckOS_Delay(os, gcdPOWER_CONTROL_DELAY));
++ }
++
++ /* Get time until delayed. */
++ gcmkPROFILE_QUERY(time, delayTime);
++
++ if (flag & gcvPOWER_FLAG_INITIALIZE)
++ {
++
++ /* Initialize GPU here, replaced by InitializeHardware later */
++ gcmkONERROR(gckVGHARDWARE_SetMMU(Hardware, Hardware->kernel->mmu->pageTableLogical));
++ gcmkVERIFY_OK(gckVGHARDWARE_SetFastClear(Hardware, -1));
++
++ /* Force the command queue to reload the next context. */
++ command->currentContext = 0;
++ }
++
++ /* Get time until initialized. */
++ gcmkPROFILE_QUERY(time, initTime);
++
++ if (flag & (gcvPOWER_FLAG_POWER_OFF | gcvPOWER_FLAG_CLOCK_OFF))
++ {
++ /* Turn off the GPU power. */
++ gcmkONERROR(
++ gckOS_SetGPUPower(os,
++ gcvCORE_VG,
++ (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE,
++ (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE));
++
++ /* Save current hardware power and clock states. */
++ Hardware->clockState = (flag & gcvPOWER_FLAG_CLOCK_OFF) ? gcvFALSE
++ : gcvTRUE;
++ Hardware->powerState = (flag & gcvPOWER_FLAG_POWER_OFF) ? gcvFALSE
++ : gcvTRUE;
++ }
++
++ /* Get time until off. */
++ gcmkPROFILE_QUERY(time, offTime);
++
++
++ /* Get time until started. */
++ gcmkPROFILE_QUERY(time, startTime);
++
++ if (flag & gcvPOWER_FLAG_RELEASE)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(os, command->powerSemaphore));
++ acquired = gcvFALSE;
++ }
++
++ /* Save the new power state. */
++ Hardware->chipPowerState = State;
++
++ if (global)
++ {
++ /* Save the new power state. */
++ Hardware->chipPowerStateGlobal = State;
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++#if gcdPOWEROFF_TIMEOUT
++ /* Reset power off time */
++ gcmkONERROR(gckOS_GetTicks(&currentTime));
++
++ Hardware->powerOffTime = currentTime + Hardware->powerOffTimeout;
++
++ if (State == gcvPOWER_IDLE)
++ {
++ /* Start a timer to power off GPU when GPU enters IDLE or SUSPEND. */
++ gcmkVERIFY_OK(gckOS_StartTimer(os,
++ Hardware->powerOffTimer,
++ Hardware->powerOffTimeout));
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE, "Cancel powerOfftimer");
++
++ /* Cancel running timer when GPU enters ON or OFF. */
++ gcmkVERIFY_OK(gckOS_StopTimer(os, Hardware->powerOffTimer));
++ }
++#endif
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(os, Hardware->powerMutex));
++
++ /* Get total time. */
++ gcmkPROFILE_QUERY(time, totalTime);
++#if gcdENABLE_PROFILING
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ "PROF(%llu): mutex:%llu on:%llu stall:%llu stop:%llu",
++ freq, mutexTime, onTime, stallTime, stopTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HARDWARE,
++ " delay:%llu init:%llu off:%llu start:%llu total:%llu",
++ delayTime, initTime, offTime, startTime, totalTime);
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (acquired)
++ {
++ /* Release semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(Hardware->os,
++ command->powerSemaphore));
++ }
++
++ if (mutexAcquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Hardware->os, Hardware->powerMutex));
++ }
++
++ if (commitMutex)
++ {
++ /* Acquire the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ command->os,
++ command->commitMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHARDWARE_QueryPowerManagementState
++**
++** Get GPU power state.
++**
++** INPUT:
++**
++** gckHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gceCHIPPOWERSTATE* State
++** Power State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(State != gcvNULL);
++
++ /* Return the statue. */
++ *State = Hardware->chipPowerState;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGHARDWARE_SetPowerManagement
++**
++** Configure GPU power management function.
++** Only used in driver initialization stage.
++**
++** INPUT:
++**
++** gckVGHARDWARE Harwdare
++** Pointer to an gckHARDWARE object.
++**
++** gctBOOL PowerManagement
++** Power Mangement State.
++**
++*/
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ Hardware->powerManagement = PowerManagement;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x Timeout=%d", Hardware, Timeout);
++
++ Hardware->powerOffTimeout = Timeout;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ )
++{
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ *Timeout = Hardware->powerOffTimeout;
++
++ gcmkFOOTER_ARG("*Timeout=%d", *Timeout);
++ return gcvSTATUS_OK;
++}
++#endif
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ )
++{
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Hardware=0x%x", Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(IsIdle != gcvNULL);
++
++ /* We are idle when the power is not ON. */
++ if (Hardware->chipPowerState != gcvPOWER_ON)
++ {
++ *IsIdle = gcvTRUE;
++ }
++
++ else
++ {
++ /* Read idle register. */
++ gcmkONERROR(
++ gckOS_ReadRegisterEx(Hardware->os, gcvCORE_VG, 0x00004, &idle));
++
++ /* Pipe must be idle. */
++ if (((((((gctUINT32) (idle)) >> (0 ? 0:0)) & ((gctUINT32) ((((1 ? 0:0) - (0 ? 0:0) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 0:0) - (0 ? 0:0) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 8:8)) & ((gctUINT32) ((((1 ? 8:8) - (0 ? 8:8) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 8:8) - (0 ? 8:8) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 9:9)) & ((gctUINT32) ((((1 ? 9:9) - (0 ? 9:9) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 9:9) - (0 ? 9:9) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 10:10)) & ((gctUINT32) ((((1 ? 10:10) - (0 ? 10:10) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 10:10) - (0 ? 10:10) + 1)))))) ) != 1)
++ || ((((((gctUINT32) (idle)) >> (0 ? 11:11)) & ((gctUINT32) ((((1 ? 11:11) - (0 ? 11:11) + 1) == 32) ? ~0 : (~(~0 << ((1 ? 11:11) - (0 ? 11:11) + 1)))))) ) != 1)
++ )
++ {
++ /* Something is busy. */
++ *IsIdle = gcvFALSE;
++ }
++
++ else
++ {
++ *IsIdle = gcvTRUE;
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif /* gcdENABLE_VG */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/archvg/gc_hal_kernel_hardware_vg.h 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,74 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_hardware_vg_h_
++#define __gc_hal_kernel_hardware_vg_h_
++
++/* gckHARDWARE object. */
++struct _gckVGHARDWARE
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckKERNEL object. */
++ gckVGKERNEL kernel;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Chip characteristics. */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 chipFeatures;
++ gctUINT32 chipMinorFeatures;
++ gctUINT32 chipMinorFeatures2;
++ gctBOOL allowFastClear;
++
++ /* Features. */
++ gctBOOL fe20;
++ gctBOOL vg20;
++ gctBOOL vg21;
++
++ /* Event mask. */
++ gctUINT32 eventMask;
++
++ gctBOOL clockState;
++ gctBOOL powerState;
++ gctPOINTER powerMutex;
++ gctUINT32 powerProcess;
++ gctUINT32 powerThread;
++ gceCHIPPOWERSTATE chipPowerState;
++ gceCHIPPOWERSTATE chipPowerStateGlobal;
++ gctISRMANAGERFUNC startIsr;
++ gctISRMANAGERFUNC stopIsr;
++ gctPOINTER isrContext;
++ gctPOINTER pageTableDirty;
++#if gcdPOWEROFF_TIMEOUT
++ gctUINT32 powerOffTime;
++ gctUINT32 powerOffTimeout;
++ gctPOINTER powerOffTimer;
++#endif
++
++ gctBOOL powerManagement;
++};
++
++#endif /* __gc_hal_kernel_hardware_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel.c 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,3976 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/*******************************************************************************
++***** Version Signature *******************************************************/
++
++#define _gcmTXT2STR(t) #t
++#define gcmTXT2STR(t) _gcmTXT2STR(t)
++const char * _VERSION = "\n\0$VERSION$"
++ gcmTXT2STR(gcvVERSION_MAJOR) "."
++ gcmTXT2STR(gcvVERSION_MINOR) "."
++ gcmTXT2STR(gcvVERSION_PATCH) ":"
++ gcmTXT2STR(gcvVERSION_BUILD) "$\n";
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define gcmDEFINE2TEXT(d) #d
++gctCONST_STRING _DispatchText[] =
++{
++ gcmDEFINE2TEXT(gcvHAL_QUERY_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_IDENTITY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_NON_PAGED_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_CONTIGUOUS_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_FREE_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_MAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNMAP_USER_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_LOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_UNLOCK_VIDEO_MEMORY),
++ gcmDEFINE2TEXT(gcvHAL_EVENT_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_USER_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_SIGNAL),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_DATA),
++ gcmDEFINE2TEXT(gcvHAL_COMMIT),
++ gcmDEFINE2TEXT(gcvHAL_STALL),
++ gcmDEFINE2TEXT(gcvHAL_READ_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER),
++ gcmDEFINE2TEXT(gcvHAL_GET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_SET_PROFILE_SETTING),
++ gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS),
++#if VIVANTE_PROFILER_PERDRAW
++ gcmDEFINE2TEXT(gcvHAL_READ_PROFILER_REGISTER_SETTING),
++#endif
++ gcmDEFINE2TEXT(gcvHAL_PROFILE_REGISTERS_2D),
++ gcmDEFINE2TEXT(gcvHAL_SET_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_POWER_MANAGEMENT_STATE),
++ gcmDEFINE2TEXT(gcvHAL_GET_BASE_ADDRESS),
++ gcmDEFINE2TEXT(gcvHAL_SET_IDLE),
++ gcmDEFINE2TEXT(gcvHAL_QUERY_KERNEL_SETTINGS),
++ gcmDEFINE2TEXT(gcvHAL_RESET),
++ gcmDEFINE2TEXT(gcvHAL_MAP_PHYSICAL),
++ gcmDEFINE2TEXT(gcvHAL_DEBUG),
++ gcmDEFINE2TEXT(gcvHAL_CACHE),
++ gcmDEFINE2TEXT(gcvHAL_TIMESTAMP),
++ gcmDEFINE2TEXT(gcvHAL_DATABASE),
++ gcmDEFINE2TEXT(gcvHAL_VERSION),
++ gcmDEFINE2TEXT(gcvHAL_CHIP_INFO),
++ gcmDEFINE2TEXT(gcvHAL_ATTACH),
++ gcmDEFINE2TEXT(gcvHAL_DETACH)
++};
++#endif
++
++#if gcdENABLE_RECOVERY
++void
++_ResetFinishFunction(
++ gctPOINTER Data
++ )
++{
++ gckKERNEL kernel = (gckKERNEL)Data;
++
++ gckOS_AtomSet(kernel->os, kernel->resetAtom, 0);
++}
++#endif
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gceCORE Core
++** Specified core.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** IN gckDB SharedDB,
++** Pointer to a shared DB.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ )
++{
++ gckKERNEL kernel = gcvNULL;
++ gceSTATUS status;
++ gctSIZE_T i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ /* Allocate the gckKERNEL object. */
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckKERNEL),
++ &pointer));
++
++ kernel = pointer;
++
++ /* Zero the object pointers. */
++ kernel->hardware = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->eventObj = gcvNULL;
++ kernel->mmu = gcvNULL;
++#if gcdDVFS
++ kernel->dvfs = gcvNULL;
++#endif
++
++ kernel->vidmemMutex = gcvNULL;
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->core = Core;
++
++
++ if (SharedDB == gcvNULL)
++ {
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(struct _gckDB),
++ &pointer));
++
++ kernel->db = pointer;
++ kernel->dbCreated = gcvTRUE;
++ kernel->db->freeDatabase = gcvNULL;
++ kernel->db->freeRecord = gcvNULL;
++ kernel->db->dbMutex = gcvNULL;
++ kernel->db->lastDatabase = gcvNULL;
++ kernel->db->idleTime = 0;
++ kernel->db->lastIdle = 0;
++ kernel->db->lastSlowdown = 0;
++
++ for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
++ {
++ kernel->db->db[i] = gcvNULL;
++ }
++
++ /* Construct a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->dbMutex));
++
++ /* Construct a id-pointer database. */
++ gcmkONERROR(gckKERNEL_CreateIntegerDatabase(kernel, &kernel->db->pointerDatabase));
++
++ /* Construct a id-pointer database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->pointerDatabaseMutex));
++ }
++ else
++ {
++ kernel->db = SharedDB;
++ kernel->dbCreated = gcvFALSE;
++ }
++
++ for (i = 0; i < gcmCOUNTOF(kernel->timers); ++i)
++ {
++ kernel->timers[i].startTime = 0;
++ kernel->timers[i].stopTime = 0;
++ }
++
++ kernel->timeOut = gcdGPU_TIMEOUT;
++
++ /* Save context. */
++ kernel->context = Context;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ kernel->virtualBufferHead =
++ kernel->virtualBufferTail = gcvNULL;
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Os, (gctPOINTER)&kernel->virtualBufferLock));
++#endif
++
++ /* Construct atom holding number of clients. */
++ kernel->atomClients = gcvNULL;
++ gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients));
++
++#if gcdENABLE_VG
++ kernel->vg = gcvNULL;
++
++ if (Core == gcvCORE_VG)
++ {
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckVGKERNEL_Construct(Os, Context, kernel, &kernel->vg));
++ }
++ else
++#endif
++ {
++ /* Construct the gckHARDWARE object. */
++ gcmkONERROR(
++ gckHARDWARE_Construct(Os, kernel->core, &kernel->hardware));
++
++ /* Set pointer to gckKERNEL object in gckHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ /* Initialize the hardware. */
++ gcmkONERROR(
++ gckHARDWARE_InitializeHardware(kernel->hardware));
++
++ /* Construct the gckCOMMAND object. */
++ gcmkONERROR(
++ gckCOMMAND_Construct(kernel, &kernel->command));
++
++ /* Construct the gckEVENT object. */
++ gcmkONERROR(
++ gckEVENT_Construct(kernel, &kernel->eventObj));
++
++ /* Construct the gckMMU object. */
++ gcmkONERROR(
++ gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
++
++#if gcdENABLE_RECOVERY
++ gcmkONERROR(
++ gckOS_AtomConstruct(Os, &kernel->resetAtom));
++
++ gcmkVERIFY_OK(
++ gckOS_CreateTimer(Os,
++ (gctTIMERFUNCTION)_ResetFinishFunction,
++ (gctPOINTER)kernel,
++ &kernel->resetFlagClearTimer));
++ kernel->resetTimeStamp = 0;
++#endif
++
++#if gcdDVFS
++ if (gckHARDWARE_IsFeatureAvailable(kernel->hardware,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING))
++ {
++ gcmkONERROR(gckDVFS_Construct(kernel->hardware, &kernel->dvfs));
++ gcmkONERROR(gckDVFS_Start(kernel->dvfs));
++ }
++#endif
++ }
++
++ spin_lock_init(&kernel->irq_lock);
++
++#if VIVANTE_PROFILER
++ /* Initialize profile setting */
++ kernel->profileEnable = gcvFALSE;
++ kernel->profileCleanRegister = gcvTRUE;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkONERROR(gckOS_CreateSyncTimeline(Os, &kernel->timeline));
++#endif
++
++ /* Construct a video memory mutex. */
++ gcmkONERROR(gckOS_GetVideoMemoryMutex(Os, &kernel->vidmemMutex));
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (kernel != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Core != gcvCORE_VG)
++#endif
++ {
++ if (kernel->eventObj != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_Destroy(kernel->eventObj));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ /* Turn off the power. */
++ gcmkVERIFY_OK(gckOS_SetGPUPower(kernel->hardware->os,
++ kernel->hardware->core,
++ gcvFALSE,
++ gcvFALSE));
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(kernel->hardware));
++ }
++ }
++
++ if (kernel->atomClients != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->atomClients));
++ }
++
++#if gcdENABLE_RECOVERY
++ if (kernel->resetAtom != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Os, kernel->resetAtom));
++ }
++
++ if (kernel->resetFlagClearTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Os, kernel->resetFlagClearTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Os, kernel->resetFlagClearTimer));
++ }
++#endif
++
++ if (kernel->dbCreated && kernel->db != gcvNULL)
++ {
++ if (kernel->db->dbMutex != gcvNULL)
++ {
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->db->dbMutex));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel->db));
++ }
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ if (kernel->virtualBufferLock != gcvNULL)
++ {
++ /* Destroy the virtual command buffer mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, kernel->virtualBufferLock));
++ }
++#endif
++
++#if gcdDVFS
++ if (kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (kernel->timeline)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Os, kernel->timeline));
++ }
++#endif
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, kernel));
++ }
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ )
++{
++ gctSIZE_T i;
++ gcsDATABASE_PTR database, databaseNext;
++ gcsDATABASE_RECORD_PTR record, recordNext;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->debugMutex));
++#endif
++
++ /* Destroy the database. */
++ if (Kernel->dbCreated)
++ {
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ if (Kernel->db->db[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyProcessDB(Kernel, Kernel->db->db[i]->processID));
++ }
++ }
++
++ /* Free all databases. */
++ for (database = Kernel->db->freeDatabase;
++ database != gcvNULL;
++ database = databaseNext)
++ {
++ databaseNext = database->next;
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, database));
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db->lastDatabase));
++ }
++
++ /* Free all database records. */
++ for (record = Kernel->db->freeRecord; record != gcvNULL; record = recordNext)
++ {
++ recordNext = record->next;
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Destroy the database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->dbMutex));
++
++
++ /* Destroy id-pointer database. */
++ gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->pointerDatabase));
++
++ /* Destroy id-pointer database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->pointerDatabaseMutex));
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg)
++ {
++ gcmkVERIFY_OK(gckVGKERNEL_Destroy(Kernel->vg));
++ }
++ else
++#endif
++ {
++ /* Destroy the gckMMU object. */
++ gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu));
++
++ /* Destroy the gckCOMMNAND object. */
++ gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command));
++
++ /* Destroy the gckEVENT object. */
++ gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->eventObj));
++
++ /* Destroy the gckHARDWARE object. */
++ gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware));
++
++#if gcdENABLE_RECOVERY
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->resetAtom));
++
++ if (Kernel->resetFlagClearTimer)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Kernel->os, Kernel->resetFlagClearTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Kernel->os, Kernel->resetFlagClearTimer));
++ }
++#endif
++ }
++
++ /* Detsroy the client atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients));
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->virtualBufferLock));
++#endif
++
++#if gcdDVFS
++ if (Kernel->dvfs)
++ {
++ gcmkVERIFY_OK(gckDVFS_Stop(Kernel->dvfs));
++ gcmkVERIFY_OK(gckDVFS_Destroy(Kernel->dvfs));
++ }
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Kernel->os, Kernel->timeline));
++#endif
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/oom.h>
++#include <linux/sched.h>
++#include <linux/notifier.h>
++
++extern struct task_struct *lowmem_deathpending;
++static unsigned long lowmem_deathpending_timeout;
++
++static int force_contiguous_lowmem_shrink(IN gckKERNEL Kernel)
++{
++ struct task_struct *p;
++ struct task_struct *selected = NULL;
++ int tasksize;
++ int ret = -1;
++ int min_adj = 0;
++ int selected_tasksize = 0;
++ int selected_oom_adj;
++ /*
++ * If we already have a death outstanding, then
++ * bail out right away; indicating to vmscan
++ * that we have nothing further to offer on
++ * this pass.
++ *
++ */
++ if (lowmem_deathpending &&
++ time_before_eq(jiffies, lowmem_deathpending_timeout))
++ return 0;
++ selected_oom_adj = min_adj;
++
++ read_lock(&tasklist_lock);
++ for_each_process(p) {
++ struct mm_struct *mm;
++ struct signal_struct *sig;
++ gcuDATABASE_INFO info;
++ int oom_adj, pid;
++
++ task_lock(p);
++ mm = p->mm;
++ sig = p->signal;
++ pid = p->pid;
++ if (!mm || !sig) {
++ task_unlock(p);
++ continue;
++ }
++ oom_adj = sig->oom_adj;
++ task_unlock(p);
++ if (oom_adj < min_adj) {
++ continue;
++ }
++
++ read_unlock(&tasklist_lock);
++
++ tasksize = 0;
++ if (gckKERNEL_QueryProcessDB(Kernel, pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++ if (gckKERNEL_QueryProcessDB(Kernel, pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++
++ read_lock(&tasklist_lock);
++
++ if (tasksize <= 0)
++ continue;
++
++ gckOS_Print("<gpu> pid %d (%s), adj %d, size %d \n", p->pid, p->comm, oom_adj, tasksize);
++
++ if (selected) {
++ if (oom_adj < selected_oom_adj)
++ continue;
++ if (oom_adj == selected_oom_adj &&
++ tasksize <= selected_tasksize)
++ continue;
++ }
++ selected = p;
++ selected_tasksize = tasksize;
++ selected_oom_adj = oom_adj;
++ }
++ if (selected) {
++ gckOS_Print("<gpu> send sigkill to %d (%s), adj %d, size %d\n",
++ selected->pid, selected->comm,
++ selected_oom_adj, selected_tasksize);
++ lowmem_deathpending = selected;
++ lowmem_deathpending_timeout = jiffies + HZ;
++ force_sig(SIGKILL, selected);
++ ret = 0;
++ }
++ read_unlock(&tasklist_lock);
++ return ret;
++}
++
++#endif
++
++/*******************************************************************************
++**
++** _AllocateMemory
++**
++** Private function to walk all required memory pools to allocate the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++static gceSTATUS
++_AllocateMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++ gctINT loopCount;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctBOOL tileStatusInVirtual;
++ gctBOOL forceContiguous = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=%d Bytes=%lu Alignment=%lu Type=%d",
++ Kernel, *Pool, Bytes, Alignment, Type);
++
++ gcmkVERIFY_ARGUMENT(Pool != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes != 0);
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++_AllocateMemory_Retry:
++#endif
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT_FORCE_CONTIGUOUS:
++ forceContiguous = gcvTRUE;
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_CONTIGUOUS:
++ loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
++ break;
++
++ case gcvPOOL_DEFAULT_FORCE_CONTIGUOUS_CACHEABLE:
++ pool = gcvPOOL_CONTIGUOUS;
++ loopCount = 1;
++ forceContiguous = gcvTRUE;
++ break;
++
++ default:
++ loopCount = 1;
++ break;
++ }
++
++ while (loopCount-- > 0)
++ {
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkONERROR(
++ gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, &node));
++
++ /* Success. */
++ break;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++#if gcdCONTIGUOUS_SIZE_LIMIT
++ if (Bytes > gcdCONTIGUOUS_SIZE_LIMIT && forceContiguous == gcvFALSE)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ }
++ else
++#endif
++ {
++ /* Create a gcuVIDMEM_NODE from contiguous memory. */
++ status = gckVIDMEM_ConstructVirtual(Kernel, gcvTRUE, Bytes, &node);
++ }
++
++ if (gcmIS_SUCCESS(status) || forceContiguous == gcvTRUE)
++ {
++ /* Memory allocated. */
++ if(node && forceContiguous == gcvTRUE)
++ {
++ gctUINT32 physAddr=0;
++ gctUINT32 baseAddress = 0;
++
++ gcmkONERROR(
++ gckOS_LockPages(Kernel->os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ gcvFALSE,
++ &node->Virtual.logical,
++ &node->Virtual.pageCount));
++
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os,
++ node->Virtual.logical,
++ &physAddr));
++
++ gcmkONERROR(
++ gckOS_UnlockPages(Kernel->os,
++ node->Virtual.physical,
++ node->Virtual.bytes,
++ node->Virtual.logical));
++
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++
++ gcmkASSERT(physAddr >= baseAddress);
++
++ /* Subtract baseAddress to get a GPU address used for programming. */
++ physAddr -= baseAddress;
++
++ if((physAddr & 0x80000000) || ((physAddr + Bytes) & 0x80000000))
++ {
++ gckOS_Print("gpu virtual memory 0x%x cannot be allocated in force contiguous request!\n", physAddr);
++
++ gcmkONERROR(gckVIDMEM_Free(Kernel,node));
++
++ node = gcvNULL;
++ }
++ }
++
++ break;
++ }
++ }
++
++ else
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++#if gcdUSE_VIDMEM_PER_PID
++ gctUINT32 pid;
++ gckOS_GetProcessID(&pid);
++
++ status = gckKERNEL_GetVideoMemoryPoolPid(Kernel, pool, pid, &videoMemory);
++ if (status == gcvSTATUS_NOT_FOUND)
++ {
++ /* Create VidMem pool for this process. */
++ status = gckKERNEL_CreateVideoMemoryPoolPid(Kernel, pool, pid, &videoMemory);
++ }
++#else
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++#endif
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Allocate memory. */
++ status = gckVIDMEM_AllocateLinear(Kernel,
++ videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ &node);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Memory allocated. */
++ node->VidMem.pool = pool;
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++
++ else
++ if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++
++ else
++ if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to contiguous memory. */
++ pool = gcvPOOL_CONTIGUOUS;
++ }
++
++ else
++ if (pool == gcvPOOL_CONTIGUOUS)
++ {
++ tileStatusInVirtual =
++ gckHARDWARE_IsFeatureAvailable(Kernel->hardware,
++ gcvFEATURE_MC20);
++
++ if (Type == gcvSURF_TILE_STATUS && tileStatusInVirtual != gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Advance to virtual memory. */
++ pool = gcvPOOL_VIRTUAL;
++ }
++
++ else
++ {
++ /* Out of pools. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ if(forceContiguous == gcvTRUE)
++ {
++ if(force_contiguous_lowmem_shrink(Kernel) == 0)
++ {
++ /* Sleep 1 millisecond. */
++ gckOS_Delay(gcvNULL, 1);
++ goto _AllocateMemory_Retry;
++ }
++ }
++#endif
++ /* Nothing allocated. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Return node and pool used for allocation. */
++ *Node = node;
++ *Pool = pool;
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Pool=%d *Node=0x%x", *Pool, *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL FromUser
++** whether the call is from the user space.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctSIZE_T bytes;
++ gcuVIDMEM_NODE_PTR node;
++ gctBOOL locked = gcvFALSE;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctPOINTER logical = gcvNULL;
++ gctPOINTER info = gcvNULL;
++ gckCONTEXT context = gcvNULL;
++ gctUINT32 address;
++ gctUINT32 processID;
++ gckKERNEL kernel = Kernel;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gctBOOL asynchronous;
++ gctPOINTER paddr = gcvNULL;
++#if !USE_NEW_LINUX_SIGNAL
++ gctSIGNAL signal;
++#endif
++ gceSURF_TYPE type;
++
++ gcmkHEADER_ARG("Kernel=0x%x FromUser=%d Interface=0x%x",
++ Kernel, FromUser, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Dispatching command %d (%s)",
++ Interface->command, _DispatchText[Interface->command]);
++#endif
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_AcquireMutex(Kernel->os, Kernel->debugMutex, gcvINFINITE);
++#endif
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++#endif
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkONERROR(
++ gckOS_GetBaseAddress(Kernel->os,
++ &Interface->u.GetBaseAddress.baseAddress));
++ break;
++
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkONERROR(
++ gckHARDWARE_QueryChipIdentity(
++ Kernel->hardware,
++ &Interface->u.QueryChipIdentity));
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.MapMemory.physical);
++
++ /* Map memory. */
++ gcmkONERROR(
++ gckKERNEL_MapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes,
++ &logical));
++
++ Interface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ logical,
++ physical,
++ (gctSIZE_T) Interface->u.MapMemory.bytes));
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ physical = gcmINT2PTR(Interface->u.UnmapMemory.physical);
++
++ /* Unmap memory. */
++ gcmkONERROR(
++ gckKERNEL_UnmapMemory(Kernel,
++ physical,
++ (gctSIZE_T) Interface->u.UnmapMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_MEMORY,
++ gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
++ break;
++
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateNonPagedMemory.bytes;
++
++ /* Allocate non-paged memory. */
++ gcmkONERROR(
++ gckOS_AllocateNonPagedMemory(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateNonPagedMemory.bytes = bytes;
++ Interface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateNonPagedMemory.physical),
++ bytes));
++
++ break;
++
++ case gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER:
++#if gcdVIRTUAL_COMMAND_BUFFER
++ bytes = (gctSIZE_T) Interface->u.AllocateVirtualCommandBuffer.bytes;
++
++ gcmkONERROR(
++ gckKERNEL_AllocateVirtualCommandBuffer(
++ Kernel,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateVirtualCommandBuffer.bytes = bytes;
++ Interface->u.AllocateVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateVirtualCommandBuffer.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_COMMAND_BUFFER,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateVirtualCommandBuffer.physical),
++ bytes));
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ /* Free non-paged memory. */
++ gcmkONERROR(
++ gckOS_FreeNonPagedMemory(Kernel->os,
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical),
++ Interface->u.FreeNonPagedMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeNonPagedMemory.physical);
++
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) Interface->u.AllocateContiguousMemory.bytes;
++
++ /* Allocate contiguous memory. */
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Kernel->os,
++ FromUser,
++ &bytes,
++ &physical,
++ &logical));
++
++ Interface->u.AllocateContiguousMemory.bytes = bytes;
++ Interface->u.AllocateContiguousMemory.logical = gcmPTR_TO_UINT64(logical);
++ Interface->u.AllocateContiguousMemory.physical = gcmPTR_TO_NAME(physical);
++
++ gcmkONERROR(gckHARDWARE_ConvertLogical(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.AllocateContiguousMemory.logical),
++ &Interface->u.AllocateContiguousMemory.address));
++
++ gcmkVERIFY_OK(gckKERNEL_AddProcessDB(
++ Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ logical,
++ gcmINT2PTR(Interface->u.AllocateContiguousMemory.physical),
++ bytes));
++
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkONERROR(gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++
++ /* Free contiguous memory. */
++ gcmkONERROR(
++ gckOS_FreeContiguous(Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical),
++ Interface->u.FreeContiguousMemory.bytes));
++#endif
++
++ gcmRELEASE_NAME(Interface->u.FreeContiguousMemory.physical);
++
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ type = Interface->u.AllocateLinearVideoMemory.type;
++
++ /* Allocate memory. */
++ gcmkONERROR(
++ _AllocateMemory(Kernel,
++ &Interface->u.AllocateLinearVideoMemory.pool,
++ Interface->u.AllocateLinearVideoMemory.bytes,
++ Interface->u.AllocateLinearVideoMemory.alignment,
++ Interface->u.AllocateLinearVideoMemory.type,
++ &node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ bytes = node->VidMem.bytes;
++ node->VidMem.type = type;
++
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_RESERVED,
++ node,
++ gcvNULL,
++ bytes));
++ }
++ else
++ {
++ bytes = node->Virtual.bytes;
++ node->Virtual.type = type;
++
++ if(node->Virtual.contiguous)
++ {
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node,
++ gcvNULL,
++ bytes));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node,
++ gcvNULL,
++ bytes));
++ }
++
++ }
++
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node,
++ gcvNULL,
++ bytes));
++
++ /* Get the node. */
++ Interface->u.AllocateLinearVideoMemory.node = gcmPTR_TO_UINT64(node);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.FreeVideoMemory.node);
++#ifdef __QNXNTO__
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM
++ && node->VidMem.logical != gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ node->VidMem.logical,
++ processID,
++ node->VidMem.bytes));
++ node->VidMem.logical = gcvNULL;
++ }
++#endif
++ /* Free video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Free(Kernel, node));
++
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_RESERVED,
++ node));
++ }
++ else if(node->Virtual.contiguous)
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node));
++ }
++
++ break;
++
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node);
++
++ /* Lock video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Lock(Kernel,
++ node,
++ Interface->u.LockVideoMemory.cacheable,
++ &Interface->u.LockVideoMemory.address));
++
++ locked = gcvTRUE;
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Map video memory address into user space. */
++#ifdef __QNXNTO__
++ if (node->VidMem.logical == gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ processID,
++ node->VidMem.bytes,
++ &node->VidMem.logical));
++ }
++ gcmkASSERT(node->VidMem.logical != gcvNULL);
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical);
++#else
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ &logical));
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(logical);
++#endif
++ }
++ else
++ {
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++
++#if gcdSECURE_USER
++ /* Return logical address as physical address. */
++ Interface->u.LockVideoMemory.address =
++ Interface->u.LockVideoMemory.memory;
++#endif
++ gcmkONERROR(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node,
++ gcvNULL,
++ 0));
++
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ /* Unlock video memory. */
++ node = gcmUINT64_TO_PTR(Interface->u.UnlockVideoMemory.node);
++
++#if gcdSECURE_USER
++ /* Save node information before it disappears. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock video memory. */
++ gcmkONERROR(
++ gckVIDMEM_Unlock(Kernel,
++ node,
++ Interface->u.UnlockVideoMemory.type,
++ &Interface->u.UnlockVideoMemory.asynchroneous));
++
++#if gcdSECURE_USER
++ /* Flush the translation cache for virtual surfaces. */
++ if (logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++ if (Interface->u.UnlockVideoMemory.asynchroneous == gcvFALSE)
++ {
++ /* There isn't a event to unlock this node, remove record now */
++ gcmkONERROR(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node));
++ }
++ break;
++
++ case gcvHAL_EVENT_COMMIT:
++ /* Commit an event queue. */
++ gcmkONERROR(
++ gckEVENT_Commit(Kernel->eventObj,
++ gcmUINT64_TO_PTR(Interface->u.Event.queue)));
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++ gcmkONERROR(
++ gckCOMMAND_Commit(Kernel->command,
++ Interface->u.Commit.context ?
++ gcmNAME_TO_PTR(Interface->u.Commit.context) : gcvNULL,
++ gcmUINT64_TO_PTR(Interface->u.Commit.commandBuffer),
++ gcmUINT64_TO_PTR(Interface->u.Commit.delta),
++ gcmUINT64_TO_PTR(Interface->u.Commit.queue),
++ processID));
++ break;
++
++ case gcvHAL_STALL:
++ /* Stall the command queue. */
++ gcmkONERROR(gckCOMMAND_Stall(Kernel->command, gcvFALSE));
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkONERROR(
++ gckOS_MapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ Interface->u.MapUserMemory.physical,
++ (gctSIZE_T) Interface->u.MapUserMemory.size,
++ &info,
++ &Interface->u.MapUserMemory.address));
++
++ Interface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.MapUserMemory.info),
++ gcmUINT64_TO_PTR(Interface->u.MapUserMemory.memory),
++ (gctSIZE_T) Interface->u.MapUserMemory.size));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ address = Interface->u.UnmapUserMemory.address;
++ info = gcmNAME_TO_PTR(Interface->u.UnmapUserMemory.info);
++
++ /* Unmap user memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) Interface->u.UnmapUserMemory.size,
++ info,
++ address));
++
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Kernel,
++ cache,
++ gcmUINT64_TO_PTR(Interface->u.UnmapUserMemory.memory),
++ Interface->u.UnmapUserMemory.size));
++#endif
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Interface->u.UnmapUserMemory.info)));
++
++ gcmRELEASE_NAME(Interface->u.UnmapUserMemory.info);
++
++ break;
++
++#if !USE_NEW_LINUX_SIGNAL
++ case gcvHAL_USER_SIGNAL:
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkONERROR(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkONERROR(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++#if gcdGPU_TIMEOUT
++ if (Interface->u.UserSignal.wait == gcvINFINITE)
++ {
++ gckHARDWARE hardware;
++ gctUINT32 timer = 0;
++
++ for(;;)
++ {
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ gcdGPU_ADVANCETIMER);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkONERROR(
++ gckOS_SignalQueryHardware(Kernel->os,
++ (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id,
++ &hardware));
++
++ if (hardware)
++ {
++ /* This signal is bound to a hardware,
++ ** so the timeout is limited by Kernel->timeOut.
++ */
++ timer += gcdGPU_ADVANCETIMER;
++ }
++
++ if (timer >= Kernel->timeOut)
++ {
++ gcmkONERROR(
++ gckOS_Broadcast(Kernel->os,
++ hardware,
++ gcvBROADCAST_GPU_STUCK));
++
++ timer = 0;
++
++ /* If a few process try to reset GPU, only one
++ ** of them can do the real reset, other processes
++ ** still need to wait for this signal is triggered,
++ ** which menas reset is finished.
++ */
++ continue;
++ }
++ }
++ else
++ {
++ /* Bail out on other error. */
++ gcmkONERROR(status);
++
++ /* Wait for signal successfully. */
++ break;
++ }
++ }
++ }
++ else
++#endif
++ {
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++ }
++
++ break;
++
++ case gcvUSER_SIGNAL_MAP:
++ gcmkONERROR(
++ gckOS_MapSignal(Kernel->os,
++ (gctSIGNAL)(gctUINTPTR_T)Interface->u.UserSignal.id,
++ (gctHANDLE)(gctUINTPTR_T)processID,
++ &signal));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_UNMAP:
++ /* Destroy the signal. */
++ gcmkONERROR(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ break;
++#endif
++
++ case gcvHAL_SET_POWER_MANAGEMENT_STATE:
++ /* Set the power management state. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(
++ Kernel->hardware,
++ Interface->u.SetPowerManagement.state));
++ break;
++
++ case gcvHAL_QUERY_POWER_MANAGEMENT_STATE:
++ /* Chip is not idle. */
++ Interface->u.QueryPowerManagement.isIdle = gcvFALSE;
++
++ /* Query the power management state. */
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(
++ Kernel->hardware,
++ &Interface->u.QueryPowerManagement.state));
++
++ /* Query the idle state. */
++ gcmkONERROR(
++ gckHARDWARE_QueryIdle(Kernel->hardware,
++ &Interface->u.QueryPowerManagement.isIdle));
++ break;
++
++ case gcvHAL_READ_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE);
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Read a register. */
++ gcmkONERROR(gckOS_ReadRegisterEx(
++ Kernel->os,
++ Kernel->core,
++ Interface->u.ReadRegisterData.address,
++ &Interface->u.ReadRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ }
++#else
++ /* No access from user land to read registers. */
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_WRITE_REGISTER:
++#if gcdREGISTER_ACCESS_FROM_USER
++ {
++ gceCHIPPOWERSTATE power;
++
++ gckOS_AcquireMutex(Kernel->os, Kernel->hardware->powerMutex, gcvINFINITE);
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ /* Write a register. */
++ gcmkONERROR(
++ gckOS_WriteRegisterEx(Kernel->os,
++ Kernel->core,
++ Interface->u.WriteRegisterData.address,
++ Interface->u.WriteRegisterData.data));
++ }
++ else
++ {
++ /* Chip is in power-state. */
++ Interface->u.WriteRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->hardware->powerMutex));
++ }
++#else
++ /* No access from user land to write registers. */
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_READ_ALL_PROFILE_REGISTERS:
++#if VIVANTE_PROFILER && VIVANTE_PROFILER_CONTEXT
++ /* Read profile data according to the context. */
++ gcmkONERROR(
++ gckHARDWARE_QueryContextProfile(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ gcmNAME_TO_PTR(Interface->u.RegisterProfileData.context),
++ &Interface->u.RegisterProfileData.counters));
++#elif VIVANTE_PROFILER
++ /* Read all 3D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_QueryProfileRegisters(
++ Kernel->hardware,
++ Kernel->profileCleanRegister,
++ &Interface->u.RegisterProfileData.counters));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_PROFILE_REGISTERS_2D:
++#if VIVANTE_PROFILER
++ /* Read all 2D profile registers. */
++ gcmkONERROR(
++ gckHARDWARE_ProfileEngine2D(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.RegisterProfileData2D.hwProfile2D)));
++#else
++ status = gcvSTATUS_OK;
++#endif
++ break;
++
++ case gcvHAL_GET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Get profile setting */
++ Interface->u.GetProfileSetting.enable = Kernel->profileEnable;
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++ case gcvHAL_SET_PROFILE_SETTING:
++#if VIVANTE_PROFILER
++ /* Set profile setting */
++ if(Kernel->hardware->gpuProfiler)
++ Kernel->profileEnable = Interface->u.SetProfileSetting.enable;
++ else
++ {
++ status = gcvSTATUS_NOT_SUPPORTED;
++ break;
++ }
++#endif
++
++ status = gcvSTATUS_OK;
++ break;
++
++#if VIVANTE_PROFILER_PERDRAW
++ case gcvHAL_READ_PROFILER_REGISTER_SETTING:
++ #if VIVANTE_PROFILER
++ Kernel->profileCleanRegister = Interface->u.SetProfilerRegisterClear.bclear;
++ #endif
++ status = gcvSTATUS_OK;
++ break;
++#endif
++
++ case gcvHAL_QUERY_KERNEL_SETTINGS:
++ /* Get kernel settings. */
++ gcmkONERROR(
++ gckKERNEL_QuerySettings(Kernel,
++ &Interface->u.QueryKernelSettings.settings));
++ break;
++
++ case gcvHAL_RESET:
++ /* Reset the hardware. */
++ gckKERNEL_Recovery(Kernel);
++ break;
++
++ case gcvHAL_DEBUG:
++ /* Set debug level and zones. */
++ if (Interface->u.Debug.set)
++ {
++ gckOS_SetDebugLevel(Interface->u.Debug.level);
++ gckOS_SetDebugZones(Interface->u.Debug.zones,
++ Interface->u.Debug.enable);
++ }
++
++ if (Interface->u.Debug.message[0] != '\0')
++ {
++ /* Print a message to the debugger. */
++ if (Interface->u.Debug.type == gcvMESSAGE_TEXT)
++ {
++ gckOS_CopyPrint(Interface->u.Debug.message);
++ }
++ else
++ {
++ gckOS_DumpBuffer(Kernel->os,
++ Interface->u.Debug.message,
++ Interface->u.Debug.messageSize,
++ gceDUMP_BUFFER_FROM_USER,
++ gcvTRUE);
++ }
++ }
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_DUMP_GPU_STATE:
++ /* Dump GPU state */
++ {
++ gceCHIPPOWERSTATE power;
++ gcmkONERROR(gckHARDWARE_QueryPowerManagementState(Kernel->hardware,
++ &power));
++ if (power == gcvPOWER_ON)
++ {
++ Interface->u.ReadRegisterData.data = 1;
++ gcmkVERIFY_OK(
++ gckHARDWARE_DumpGPUState(Kernel->hardware));
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gcmkVERIFY_OK(
++ gckCOMMAND_DumpExecutingBuffer(Kernel->command));
++#endif
++ }
++ else
++ {
++ Interface->u.ReadRegisterData.data = 0;
++ status = gcvSTATUS_CHIP_NOT_READY;
++ }
++ }
++ break;
++
++ case gcvHAL_DUMP_EVENT:
++ /* Dump GPU event */
++ gcmkVERIFY_OK(gckEVENT_Dump(Kernel->eventObj));
++
++ /* Dump Process DB. */
++ gcmkVERIFY_OK(gckKERNEL_DumpProcessDB(Kernel));
++ break;
++
++ case gcvHAL_CACHE:
++ node = gcmUINT64_TO_PTR(Interface->u.Cache.node);
++ if (node == gcvNULL)
++ {
++ /* FIXME Surface wrap some memory which is not allocated by us,
++ ** So we don't have physical address to handle outer cache, ignore it*/
++ status = gcvSTATUS_OK;
++ break;
++ }
++ else if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Video memory has no physical handles. */
++ physical = gcvNULL;
++ }
++ else
++ {
++ /* Grab physical handle. */
++ physical = node->Virtual.physical;
++ }
++
++ logical = gcmUINT64_TO_PTR(Interface->u.Cache.logical);
++ bytes = (gctSIZE_T) Interface->u.Cache.bytes;
++ switch(Interface->u.Cache.operation)
++ {
++ case gcvCACHE_FLUSH:
++ /* Clean and invalidate the cache. */
++ status = gckOS_CacheFlush(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_CLEAN:
++ /* Clean the cache. */
++ status = gckOS_CacheClean(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++ case gcvCACHE_INVALIDATE:
++ /* Invalidate the cache. */
++ status = gckOS_CacheInvalidate(Kernel->os,
++ processID,
++ physical,
++ paddr,
++ logical,
++ bytes);
++ break;
++
++ case gcvCACHE_MEMORY_BARRIER:
++ status = gckOS_MemoryBarrier(Kernel->os,
++ logical);
++ break;
++ default:
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ /* Check for invalid timer. */
++ if ((Interface->u.TimeStamp.timer >= gcmCOUNTOF(Kernel->timers))
++ || (Interface->u.TimeStamp.request != 2))
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Return timer results and reset timer. */
++ {
++ gcsTIMER_PTR timer = &(Kernel->timers[Interface->u.TimeStamp.timer]);
++ gctUINT64 timeDelta = 0;
++
++ if (timer->stopTime < timer->startTime )
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ timeDelta = timer->stopTime - timer->startTime;
++
++ /* Check truncation overflow. */
++ Interface->u.TimeStamp.timeDelta = (gctINT32) timeDelta;
++ /*bit0~bit30 is available*/
++ if (timeDelta>>31)
++ {
++ Interface->u.TimeStamp.timeDelta = 0;
++ gcmkONERROR(gcvSTATUS_TIMER_OVERFLOW);
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ break;
++
++ case gcvHAL_DATABASE:
++ /* Query video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_VIDEO_MEMORY,
++ &Interface->u.Database.vidMem));
++
++ /* Query non-paged memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_NON_PAGED,
++ &Interface->u.Database.nonPaged));
++
++ /* Query contiguous memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_CONTIGUOUS,
++ &Interface->u.Database.contiguous));
++
++ /* Query GPU idle time. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.Database.processID,
++ !Interface->u.Database.validProcessID,
++ gcvDB_IDLE,
++ &Interface->u.Database.gpuIdle));
++ break;
++
++ case gcvHAL_VIDMEM_DATABASE:
++ /* Query reserved video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_RESERVED,
++ &Interface->u.VidMemDatabase.vidMemResv));
++
++ /* Query contiguous video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ &Interface->u.VidMemDatabase.vidMemCont));
++
++ /* Query virtual video memory. */
++ gcmkONERROR(
++ gckKERNEL_QueryProcessDB(Kernel,
++ Interface->u.VidMemDatabase.processID,
++ !Interface->u.VidMemDatabase.validProcessID,
++ gcvDB_VIDEO_MEMORY_VIRTUAL,
++ &Interface->u.VidMemDatabase.vidMemVirt));
++
++ break;
++
++ case gcvHAL_VERSION:
++ Interface->u.Version.major = gcvVERSION_MAJOR;
++ Interface->u.Version.minor = gcvVERSION_MINOR;
++ Interface->u.Version.patch = gcvVERSION_PATCH;
++ Interface->u.Version.build = gcvVERSION_BUILD;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "KERNEL version %d.%d.%d build %u %s %s",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH,
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME);
++#endif
++ break;
++
++ case gcvHAL_CHIP_INFO:
++ /* Only if not support multi-core */
++ Interface->u.ChipInfo.count = 1;
++ Interface->u.ChipInfo.types[0] = Kernel->hardware->type;
++ break;
++
++ case gcvHAL_ATTACH:
++ /* Attach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Attach(Kernel->command,
++ &context,
++ &bytes,
++ processID));
++
++ Interface->u.Attach.stateCount = bytes;
++ Interface->u.Attach.context = gcmPTR_TO_NAME(context);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Attach.context),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_DETACH:
++ /* Detach user process. */
++ gcmkONERROR(
++ gckCOMMAND_Detach(Kernel->command,
++ gcmNAME_TO_PTR(Interface->u.Detach.context)));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_CONTEXT,
++ gcmINT2PTR(Interface->u.Detach.context)));
++
++ gcmRELEASE_NAME(Interface->u.Detach.context);
++ break;
++
++ case gcvHAL_COMPOSE:
++ Interface->u.Compose.physical = gcmPTR_TO_UINT64(gcmNAME_TO_PTR(Interface->u.Compose.physical));
++ /* Start composition. */
++ gcmkONERROR(
++ gckEVENT_Compose(Kernel->eventObj,
++ &Interface->u.Compose));
++ break;
++
++ case gcvHAL_SET_TIMEOUT:
++ /* set timeOut value from user */
++ gckKERNEL_SetTimeOut(Kernel, Interface->u.SetTimeOut.timeOut);
++ break;
++
++#if gcdFRAME_DB
++ case gcvHAL_GET_FRAME_INFO:
++ gcmkONERROR(gckHARDWARE_GetFrameInfo(
++ Kernel->hardware,
++ gcmUINT64_TO_PTR(Interface->u.GetFrameInfo.frameInfo)));
++ break;
++#endif
++
++ case gcvHAL_GET_SHARED_INFO:
++ if (Interface->u.GetSharedInfo.data == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++ else
++ {
++ gctUINT32 pid = Interface->u.GetSharedInfo.pid;
++ gctUINT32 dataId = Interface->u.GetSharedInfo.dataId;
++ gctSIZE_T bytes = Interface->u.GetSharedInfo.bytes;
++ gctPOINTER data = Interface->u.GetSharedInfo.data;
++ gcsDATABASE_RECORD record;
++
++ /* Find record. */
++ gcmkONERROR(
++ gckKERNEL_FindProcessDB(Kernel,
++ pid,
++ 0,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ &record));
++
++ /* Check memory size. */
++ if (bytes < record.bytes)
++ {
++ /* Insufficient memory to hold shared data. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Copy to user. */
++ status = gckOS_CopyToUserData(Kernel->os,
++ record.physical,
++ data,
++ record.bytes);
++
++ /*
++ * Remove from process db.
++ * Every time when shared info is taken, the record is erased in
++ * kernel side.
++ */
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ pid,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId)));
++ /* Free existed data. */
++ gcmkVERIFY_OK(
++ gckOS_FreeMemory(Kernel->os, record.physical));
++ }
++ break;
++
++ case gcvHAL_SET_SHARED_INFO:
++ {
++ gctUINT32 dataId = Interface->u.SetSharedInfo.dataId;
++ gctPOINTER data = Interface->u.SetSharedInfo.data;
++ gctUINT32 bytes = Interface->u.SetSharedInfo.bytes;
++ gctPOINTER memory = gcvNULL;
++ gcsDATABASE_RECORD record;
++
++ if (gcmIS_SUCCESS(gckKERNEL_FindProcessDB(Kernel,
++ processID,
++ 0,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ &record)))
++ {
++ /* Find a record with the same id. */
++ if (bytes != record.bytes)
++ {
++ /* Remove from process db. */
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId)));
++
++ /* Free existed data. */
++ gcmkVERIFY_OK(
++ gckOS_FreeMemory(Kernel->os, record.physical));
++ }
++ else
++ {
++ /* Re-use allocated memory. */
++ memory = record.physical;
++ }
++ }
++
++ if ((data == gcvNULL) || (bytes == 0))
++ {
++ /* Nothing to record. */
++ break;
++ }
++
++ if (bytes > 1024)
++ {
++ /* Limite data size. */
++ gcmkONERROR(gcvSTATUS_TOO_COMPLEX);
++ }
++
++ if (memory == gcvNULL)
++ {
++ /* Allocate memory for holding shared data. */
++ gcmkONERROR(
++ gckOS_AllocateMemory(Kernel->os, bytes, &memory));
++
++ /* Add to process db. */
++ status = gckKERNEL_AddProcessDB(Kernel,
++ processID,
++ gcvDB_SHARED_INFO,
++ gcmINT2PTR(dataId),
++ memory,
++ bytes);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Failed to add process db. Free allocated memory. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Kernel->os, memory));
++ break;
++ }
++ }
++
++ /* Copy shared data to kernel memory. */
++ gcmkONERROR(
++ gckOS_CopyFromUserData(Kernel->os,
++ memory,
++ data,
++ bytes));
++ }
++ break;
++
++ case gcvHAL_SET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_SetFscaleValue(Kernel->hardware,
++ Interface->u.SetFscaleValue.value);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++ case gcvHAL_GET_FSCALE_VALUE:
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ status = gckHARDWARE_GetFscaleValue(Kernel->hardware,
++ &Interface->u.GetFscaleValue.value,
++ &Interface->u.GetFscaleValue.minValue,
++ &Interface->u.GetFscaleValue.maxValue);
++#else
++ status = gcvSTATUS_NOT_SUPPORTED;
++#endif
++ break;
++
++ case gcvHAL_QUERY_RESET_TIME_STAMP:
++#if gcdENABLE_RECOVERY
++ Interface->u.QueryResetTimeStamp.timeStamp = Kernel->resetTimeStamp;
++#else
++ Interface->u.QueryResetTimeStamp.timeStamp = 0;
++#endif
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ switch (Interface->u.SyncPoint.command)
++ {
++ case gcvSYNC_POINT_CREATE:
++ gcmkONERROR(gckOS_CreateSyncPoint(Kernel->os, &syncPoint));
++
++ Interface->u.SyncPoint.syncPoint = gcmPTR_TO_UINT64(syncPoint);
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvSYNC_POINT_DESTROY:
++ syncPoint = gcmUINT64_TO_PTR(Interface->u.SyncPoint.syncPoint);
++
++ gcmkONERROR(gckOS_DestroySyncPoint(Kernel->os, syncPoint));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_SYNC_POINT,
++ syncPoint));
++ break;
++
++ default:
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ break;
++ }
++ }
++ break;
++
++ case gcvHAL_CREATE_NATIVE_FENCE:
++ {
++ gctINT fenceFD;
++ gctSYNC_POINT syncPoint =
++ gcmUINT64_TO_PTR(Interface->u.CreateNativeFence.syncPoint);
++
++ gcmkONERROR(
++ gckOS_CreateNativeFence(Kernel->os,
++ Kernel->timeline,
++ syncPoint,
++ &fenceFD));
++
++ Interface->u.CreateNativeFence.fenceFD = fenceFD;
++ }
++ break;
++#endif
++
++ default:
++ /* Invalid command. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++OnError:
++ /* Save status. */
++ Interface->status = status;
++
++ if (gcmIS_ERROR(status))
++ {
++ if (locked)
++ {
++ /* Roll back the lock. */
++ gcmkVERIFY_OK(
++ gckVIDMEM_Unlock(Kernel,
++ gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node),
++ gcvSURF_TYPE_UNKNOWN,
++ &asynchronous));
++
++ if (gcvTRUE == asynchronous)
++ {
++ /* Bottom Half */
++ gcmkVERIFY_OK(
++ gckVIDMEM_Unlock(Kernel,
++ gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node),
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++ }
++ }
++ }
++
++#if QNX_SINGLE_THREADED_DEBUGGING
++ gckOS_ReleaseMutex(Kernel->os, Kernel->debugMutex);
++#endif
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcess
++**
++** Attach or detach a process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d", Kernel, Attach);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ gcmkONERROR(gckKERNEL_AttachProcessEx(Kernel, Attach, processID));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AttachProcessEx
++**
++** Attach or detach a process with the given PID. Can be paired with gckKERNEL_AttachProcess
++** provided the programmer is aware of the consequences.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL Attach
++** gcvTRUE if a new process gets attached or gcFALSE when a process
++** gets detatched.
++**
++** gctUINT32 PID
++** PID of the process to attach or detach.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ )
++{
++ gceSTATUS status;
++ gctINT32 old;
++
++ gcmkHEADER_ARG("Kernel=0x%x Attach=%d PID=%d", Kernel, Attach, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (Attach)
++ {
++ /* Increment the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomIncrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 0)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_FIRST_PROCESS));
++ }
++ }
++
++ if (Kernel->dbCreated)
++ {
++ /* Create the process database. */
++ gcmkONERROR(gckKERNEL_CreateProcessDB(Kernel, PID));
++ }
++ }
++ else
++ {
++ if (Kernel->dbCreated)
++ {
++ /* Clean up the process database. */
++ gcmkONERROR(gckKERNEL_DestroyProcessDB(Kernel, PID));
++
++ /* Save the last know process ID. */
++ Kernel->db->lastProcessID = PID;
++ }
++
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ status = gckEVENT_Submit(Kernel->eventObj, gcvTRUE, gcvFALSE);
++
++ if (status == gcvSTATUS_INTERRUPTED && Kernel->eventObj->submitTimer)
++ {
++ gcmkONERROR(gckOS_StartTimer(Kernel->os,
++ Kernel->eventObj->submitTimer,
++ 1));
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++ }
++
++ /* Decrement the number of clients attached. */
++ gcmkONERROR(
++ gckOS_AtomDecrement(Kernel->os, Kernel->atomClients, &old));
++
++ if (old == 1)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ /* Last client detached, switch to SUSPEND power state. */
++ gcmkONERROR(gckOS_Broadcast(Kernel->os,
++ Kernel->hardware,
++ gcvBROADCAST_LAST_PROCESS));
++ }
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++ }
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++ static gctBOOL baseAddressValid = gcvFALSE;
++ static gctUINT32 baseAddress;
++ gctBOOL needBase;
++ gcskLOGICAL_CACHE_PTR slot;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x *Data=0x%x",
++ Kernel, Cache, gcmOPT_POINTER(Data));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ if (!baseAddressValid)
++ {
++ /* Get base address. */
++ gcmkONERROR(gckHARDWARE_GetBaseAddress(Kernel->hardware, &baseAddress));
++
++ baseAddressValid = gcvTRUE;
++ }
++
++ /* Does this state load need a base address? */
++ gcmkONERROR(gckHARDWARE_NeedBaseAddress(Kernel->hardware,
++ ((gctUINT32_PTR) Data)[-1],
++ &needBase));
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ {
++ gcskLOGICAL_CACHE_PTR next;
++ gctINT i;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next, next = gcvNULL;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next
++ )
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ /* Use the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ {
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR next = gcvNULL;
++ gcskLOGICAL_CACHE_PTR oldestSlot = gcvNULL;
++ slot = gcvNULL;
++
++ if (Cache->cacheIndex != gcvNULL)
++ {
++ /* Walk the cache forwards. */
++ for (i = 1, slot = Cache->cacheIndex;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->next)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++
++ if (next == gcvNULL)
++ {
++ /* Walk the cache backwards. */
++ for (slot = Cache->cacheIndex->prev;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = slot->prev)
++ {
++ if (slot->logical == *Data)
++ {
++ /* Bail out. */
++ next = slot;
++ break;
++ }
++
++ /* Determine age of this slot. */
++ if ((oldestSlot == gcvNULL)
++ || (oldestSlot->stamp > slot->stamp)
++ )
++ {
++ oldestSlot = slot;
++ }
++ }
++ }
++ }
++
++ /* See if we had a miss. */
++ if (next == gcvNULL)
++ {
++ if (Cache->cacheFree != 0)
++ {
++ slot = &Cache->cache[Cache->cacheFree];
++ gcmkASSERT(slot->logical == gcvNULL);
++
++ ++ Cache->cacheFree;
++ if (Cache->cacheFree >= gcmCOUNTOF(Cache->cache))
++ {
++ Cache->cacheFree = 0;
++ }
++ }
++ else
++ {
++ /* Use the oldest cache slot. */
++ gcmkASSERT(oldestSlot != gcvNULL);
++ slot = oldestSlot;
++
++ /* Unlink from the chain. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append to the end. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++
++ /* Save time stamp. */
++ slot->stamp = ++ Cache->cacheStamp;
++
++ /* Save current slot for next lookup. */
++ Cache->cacheIndex = slot;
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ {
++ gctINT i;
++ gctUINT32 data = gcmPTR2INT(*Data);
++ gctUINT32 key, index;
++ gcskLOGICAL_CACHE_PTR hash;
++
++ /* Generate a hash key. */
++ key = (data >> 24) + (data >> 16) + (data >> 8) + data;
++ index = key % gcmCOUNTOF(Cache->hash);
++
++ /* Get the hash entry. */
++ hash = &Cache->hash[index];
++
++ for (slot = hash->nextHash, i = 0;
++ (slot != gcvNULL) && (i < gcdSECURE_CACHE_SLOTS);
++ slot = slot->nextHash, ++i
++ )
++ {
++ if (slot->logical == (*Data))
++ {
++ break;
++ }
++ }
++
++ if (slot == gcvNULL)
++ {
++ /* Grab from the tail of the cache. */
++ slot = Cache->cache[0].prev;
++
++ /* Unlink slot from any hash table it is part of. */
++ if (slot->prevHash != gcvNULL)
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++
++ if (hash->nextHash != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "Hash Collision: logical=0x%x key=0x%08x",
++ *Data, key);
++ }
++
++ /* Insert the slot at the head of the hash list. */
++ slot->nextHash = hash->nextHash;
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot;
++ }
++ slot->prevHash = hash;
++ hash->nextHash = slot;
++ }
++
++ /* Move slot to head of list. */
++ if (slot != Cache->cache[0].next)
++ {
++ /* Unlink. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Move to head of chain. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++ }
++ }
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ {
++ gctUINT32 index = (gcmPTR2INT(*Data) % gcdSECURE_CACHE_SLOTS) + 1;
++
++ /* Get cache slot. */
++ slot = &Cache->cache[index];
++
++ /* Check for cache miss. */
++ if (slot->logical != *Data)
++ {
++ /* Initialize the cache line. */
++ slot->logical = *Data;
++
++ /* Map the logical address to a DMA address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, *Data, &slot->dma));
++ }
++ }
++#endif
++
++ /* Return DMA address. */
++ *Data = gcmINT2PTR(slot->dma + (needBase ? baseAddress : 0));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gctINT i;
++ gcskLOGICAL_CACHE_PTR slot;
++ gctUINT8_PTR ptr;
++
++ gcmkHEADER_ARG("Kernel=0x%x Cache=0x%x Logical=0x%x Bytes=%lu",
++ Kernel, Cache, Logical, Bytes);
++
++ /* Do we need to flush the entire cache? */
++ if (Logical == gcvNULL)
++ {
++ /* Clear all cache slots. */
++ for (i = 1; i <= gcdSECURE_CACHE_SLOTS; ++i)
++ {
++ Cache->cache[i].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ Cache->cache[i].nextHash = gcvNULL;
++ Cache->cache[i].prevHash = gcvNULL;
++#endif
++}
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero the hash table. */
++ for (i = 0; i < gcmCOUNTOF(Cache->hash); ++i)
++ {
++ Cache->hash[i].nextHash = gcvNULL;
++ }
++#endif
++
++ /* Reset the cache functionality. */
++ Cache->cacheIndex = gcvNULL;
++ Cache->cacheFree = 1;
++ Cache->cacheStamp = 0;
++ }
++
++ else
++ {
++ gctUINT8_PTR low = (gctUINT8_PTR) Logical;
++ gctUINT8_PTR high = low + Bytes;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LRU
++ gcskLOGICAL_CACHE_PTR next;
++
++ /* Walk all used cache slots. */
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next
++ )
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ gcskLOGICAL_CACHE_PTR next;
++
++ for (i = 1, slot = Cache->cache[0].next;
++ (i <= gcdSECURE_CACHE_SLOTS) && (slot->logical != gcvNULL);
++ ++i, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Test if this slot is the current slot. */
++ if (slot == Cache->cacheIndex)
++ {
++ /* Move to next or previous slot. */
++ Cache->cacheIndex = (slot->next->logical != gcvNULL)
++ ? slot->next
++ : (slot->prev->logical != gcvNULL)
++ ? slot->prev
++ : gcvNULL;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Insert slot to head of cache. */
++ slot->prev = &Cache->cache[0];
++ slot->next = Cache->cache[0].next;
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->stamp = 0;
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ gctINT j;
++ gcskLOGICAL_CACHE_PTR hash, next;
++
++ /* Walk all hash tables. */
++ for (i = 0, hash = Cache->hash;
++ i < gcmCOUNTOF(Cache->hash);
++ ++i, ++hash)
++ {
++ /* Walk all slots in the hash. */
++ for (j = 0, slot = hash->nextHash;
++ (j < gcdSECURE_CACHE_SLOTS) && (slot != gcvNULL);
++ ++j, slot = next)
++ {
++ /* Save pointer to next slot. */
++ next = slot->next;
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Unlink slot from hash table. */
++ if (slot->prevHash == hash)
++ {
++ hash->nextHash = slot->nextHash;
++ }
++ else
++ {
++ slot->prevHash->nextHash = slot->nextHash;
++ }
++
++ if (slot->nextHash != gcvNULL)
++ {
++ slot->nextHash->prevHash = slot->prevHash;
++ }
++
++ /* Unlink slot from cache. */
++ slot->prev->next = slot->next;
++ slot->next->prev = slot->prev;
++
++ /* Append slot to tail of cache. */
++ slot->prev = Cache->cache[0].prev;
++ slot->next = &Cache->cache[0];
++ slot->prev->next = slot;
++ slot->next->prev = slot;
++
++ /* Mark slot as empty. */
++ slot->logical = gcvNULL;
++ slot->prevHash = gcvNULL;
++ slot->nextHash = gcvNULL;
++ }
++ }
++ }
++
++#elif gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_TABLE
++ gctUINT32 index;
++
++ /* Loop while inside the range. */
++ for (i = 1; (low < high) && (i <= gcdSECURE_CACHE_SLOTS); ++i)
++ {
++ /* Get index into cache for this range. */
++ index = (gcmPTR2INT(low) % gcdSECURE_CACHE_SLOTS) + 1;
++ slot = &Cache->cache[index];
++
++ /* Test if this slot falls within the range to flush. */
++ ptr = (gctUINT8_PTR) slot->logical;
++ if ((ptr >= low) && (ptr < high))
++ {
++ /* Remove entry from cache. */
++ slot->logical = gcvNULL;
++ }
++
++ /* Next block. */
++ low += gcdSECURE_CACHE_SLOTS;
++ }
++#endif
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ )
++{
++#if gcdENABLE_RECOVERY
++#define gcdEVENT_MASK 0x3FFFFFFF
++ gceSTATUS status;
++ gckEVENT eventObj;
++ gckHARDWARE hardware;
++#if gcdSECURE_USER
++ gctUINT32 processID;
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ gctUINT32 oldValue;
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Grab gckEVENT object. */
++ eventObj = Kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObj, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++#if gcdSECURE_USER
++ /* Flush the secure mapping cache. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Kernel, processID, &cache));
++ gcmkONERROR(gckKERNEL_FlushTranslationCache(Kernel, cache, gcvNULL, 0));
++#endif
++
++ gcmkONERROR(
++ gckOS_AtomicExchange(Kernel->os, Kernel->resetAtom, 1, &oldValue));
++
++ if (oldValue)
++ {
++ /* Some one else will recovery GPU. */
++ return gcvSTATUS_OK;
++ }
++
++ gcmkPRINT("[galcore]: GPU[%d] hang, automatic recovery.", Kernel->core);
++
++ /* Start a timer to clear reset flag, before timer is expired,
++ ** other recovery request is ignored. */
++ gcmkVERIFY_OK(
++ gckOS_StartTimer(Kernel->os,
++ Kernel->resetFlagClearTimer,
++ gcdGPU_TIMEOUT - 500));
++
++
++ /* Try issuing a soft reset for the GPU. */
++ status = gckHARDWARE_Reset(hardware);
++ if (status == gcvSTATUS_NOT_SUPPORTED)
++ {
++ /* Switch to OFF power. The next submit should return the GPU to ON
++ ** state. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(hardware,
++ gcvPOWER_OFF_RECOVERY));
++ }
++ else
++ {
++ /* Bail out on reset error. */
++ gcmkONERROR(status);
++ }
++
++ /* Handle all outstanding events now. */
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, gcdEVENT_MASK));
++#else
++ eventObj->pending = gcdEVENT_MASK;
++#endif
++ gcmkONERROR(gckEVENT_Notify(eventObj, 1));
++
++ /* Again in case more events got submitted. */
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomSet(Kernel->os, eventObj->pending, gcdEVENT_MASK));
++#else
++ eventObj->pending = gcdEVENT_MASK;
++#endif
++ gcmkONERROR(gckEVENT_Notify(eventObj, 2));
++
++ Kernel->resetTimeStamp++;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#else
++ return gcvSTATUS_OK;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_OpenUserData
++**
++** Get access to the user data.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctPOINTER StaticStorage
++** Pointer to the kernel storage where the data is to be copied if
++** NeedCopy is gcvTRUE.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to the kernel pointer that will be pointing to the data.
++*/
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d StaticStorage=0x%08X "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, StaticStorage, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(!NeedCopy || (StaticStorage != gcvNULL));
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ if (NeedCopy)
++ {
++ /* Copy the user data to the static storage. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Kernel->os, StaticStorage, UserPointer, Size
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = StaticStorage;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map the user pointer. */
++ gcmkONERROR(gckOS_MapUserPointer(
++ Kernel->os, UserPointer, Size, &pointer
++ ));
++
++ /* Set the kernel pointer. */
++ * KernelPointer = pointer;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_CloseUserData
++**
++** Release resources associated with the user data connection opened by
++** gckKERNEL_OpenUserData.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL NeedCopy
++** The flag indicating whether or not the data should be copied.
++**
++** gctBOOL FlushData
++** If gcvTRUE, the data is written back to the user.
++**
++** gctPOINTER UserPointer
++** User pointer to the data.
++**
++** gctSIZE_T Size
++** Size of the data.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Kernel pointer to the data.
++*/
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG(
++ "Kernel=0x%08X NeedCopy=%d FlushData=%d "
++ "UserPointer=0x%08X Size=%lu KernelPointer=0x%08X",
++ Kernel, NeedCopy, FlushData, UserPointer, Size, KernelPointer
++ );
++
++ /* Validate the arguemnts. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(UserPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Get a shortcut to the kernel pointer. */
++ pointer = * KernelPointer;
++
++ if (pointer != gcvNULL)
++ {
++ if (NeedCopy)
++ {
++ if (FlushData)
++ {
++ gcmkONERROR(gckOS_CopyToUserData(
++ Kernel->os, * KernelPointer, UserPointer, Size
++ ));
++ }
++ }
++ else
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Kernel->os,
++ UserPointer,
++ Size,
++ * KernelPointer
++ ));
++ }
++
++ /* Reset the kernel pointer. */
++ * KernelPointer = gcvNULL;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ )
++{
++ gcmkHEADER_ARG("Kernel=0x%x timeOut=%d", Kernel, timeOut);
++#if gcdGPU_TIMEOUT
++ Kernel->timeOut = timeOut;
++#endif
++ gcmkFOOTER_NO();
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckOS os = Kernel->os;
++ gceSTATUS status;
++ gctPOINTER logical;
++ gctSIZE_T pageCount;
++ gctSIZE_T bytes = *Bytes;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(os,
++ sizeof(gckVIRTUAL_COMMAND_BUFFER),
++ (gctPOINTER)&buffer));
++
++ gcmkONERROR(gckOS_ZeroMemory(buffer, sizeof(gckVIRTUAL_COMMAND_BUFFER)));
++
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(os,
++ gcvFALSE,
++ bytes,
++ &buffer->physical));
++
++ if (InUserSpace)
++ {
++ gcmkONERROR(gckOS_LockPages(os,
++ buffer->physical,
++ bytes,
++ gcvFALSE,
++ &logical,
++ &pageCount));
++
++ *Logical =
++ buffer->userLogical = logical;
++ }
++ else
++ {
++ gcmkONERROR(
++ gckOS_CreateKernelVirtualMapping(buffer->physical,
++ &pageCount,
++ &logical));
++ *Logical =
++ buffer->kernelLogical = logical;
++ }
++
++ buffer->pageCount = pageCount;
++ buffer->kernel = Kernel;
++
++ gcmkONERROR(gckOS_GetProcessID(&buffer->pid));
++
++ gcmkONERROR(gckMMU_AllocatePages(Kernel->mmu,
++ pageCount,
++ &buffer->pageTable,
++ &buffer->gpuAddress));
++
++ gcmkONERROR(gckOS_MapPagesEx(os,
++ Kernel->core,
++ buffer->physical,
++ pageCount,
++ buffer->pageTable));
++
++ gcmkONERROR(gckMMU_Flush(Kernel->mmu));
++
++ *Physical = buffer;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
++ "gpuAddress = %x pageCount = %d kernelLogical = %x userLogical=%x",
++ buffer->gpuAddress, buffer->pageCount,
++ buffer->kernelLogical, buffer->userLogical);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ if (Kernel->virtualBufferHead == gcvNULL)
++ {
++ Kernel->virtualBufferHead =
++ Kernel->virtualBufferTail = buffer;
++ }
++ else
++ {
++ buffer->prev = Kernel->virtualBufferTail;
++ Kernel->virtualBufferTail->next = buffer;
++ Kernel->virtualBufferTail = buffer;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (buffer->gpuAddress)
++ {
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu, buffer->pageTable, buffer->pageCount));
++ }
++
++ if (buffer->userLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_UnlockPages(os, buffer->physical, bytes, buffer->userLogical));
++ }
++
++ if (buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(buffer->kernelLogical));
++ }
++
++ if (buffer->physical)
++ {
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, bytes));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ gckOS os;
++ gckKERNEL kernel;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer = (gckVIRTUAL_COMMAND_BUFFER_PTR)Physical;
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(buffer != gcvNULL);
++
++ kernel = buffer->kernel;
++ os = kernel->os;
++
++ if (buffer->userLogical)
++ {
++ gcmkVERIFY_OK(gckOS_UnlockPages(os, buffer->physical, Bytes, Logical));
++ }
++ else
++ {
++ gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(Logical));
++ }
++
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(kernel->mmu, buffer->pageTable, buffer->pageCount));
++
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(os, buffer->physical, Bytes));
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, kernel->virtualBufferLock, gcvINFINITE));
++
++ if (buffer == kernel->virtualBufferHead)
++ {
++ if ((kernel->virtualBufferHead = buffer->next) == gcvNULL)
++ {
++ kernel->virtualBufferTail = gcvNULL;
++ }
++ }
++ else
++ {
++ buffer->prev->next = buffer->next;
++
++ if (buffer == kernel->virtualBufferTail)
++ {
++ kernel->virtualBufferTail = buffer->prev;
++ }
++ else
++ {
++ buffer->next->prev = buffer->prev;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, kernel->virtualBufferLock));
++
++ gcmkVERIFY_OK(gckOS_Free(os, buffer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctPOINTER start;
++ gctINT pid;
++
++ gcmkHEADER_ARG("Logical = %x", Logical);
++
++ gckOS_GetProcessID(&pid);
++
++ status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffer. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ if (buffer->userLogical)
++ {
++ start = buffer->userLogical;
++ }
++ else
++ {
++ start = buffer->kernelLogical;
++ }
++
++ if (Logical >= start
++ && (Logical < (start + buffer->pageCount * 4096))
++ && pid == buffer->pid
++ )
++ {
++ * Address = buffer->gpuAddress + (Logical - start);
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ )
++{
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 start;
++ gceSTATUS status = gcvSTATUS_NOT_SUPPORTED;
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, Kernel->virtualBufferLock, gcvINFINITE));
++
++ /* Walk all command buffers. */
++ for (buffer = Kernel->virtualBufferHead; buffer != gcvNULL; buffer = buffer->next)
++ {
++ start = (gctUINT32)buffer->gpuAddress;
++
++ if (GpuAddress >= start && GpuAddress < (start + buffer->pageCount * 4096))
++ {
++ /* Find a range matched. */
++ *Buffer = buffer;
++ status = gcvSTATUS_OK;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->virtualBufferLock));
++
++ return status;
++}
++#endif
++
++#if gcdLINK_QUEUE_SIZE
++static void
++gckLINKQUEUE_Dequeue(
++ IN gckLINKQUEUE LinkQueue
++ )
++{
++ gcmkASSERT(LinkQueue->count == gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count--;
++ LinkQueue->front = (LinkQueue->front + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ )
++{
++ if (LinkQueue->count == gcdLINK_QUEUE_SIZE)
++ {
++ gckLINKQUEUE_Dequeue(LinkQueue);
++ }
++
++ gcmkASSERT(LinkQueue->count < gcdLINK_QUEUE_SIZE);
++
++ LinkQueue->count++;
++
++ LinkQueue->data[LinkQueue->rear].start = start;
++ LinkQueue->data[LinkQueue->rear].end = end;
++
++ gcmkVERIFY_OK(
++ gckOS_GetProcessID(&LinkQueue->data[LinkQueue->rear].pid));
++
++ LinkQueue->rear = (LinkQueue->rear + 1) % gcdLINK_QUEUE_SIZE;
++}
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ )
++{
++ gcmkASSERT(Index >= 0 && Index < gcdLINK_QUEUE_SIZE);
++
++ *Data = &LinkQueue->data[(Index + LinkQueue->front) % gcdLINK_QUEUE_SIZE];
++}
++#endif
++
++/******************************************************************************\
++*************************** Pointer - ID translation ***************************
++\******************************************************************************/
++#define gcdID_TABLE_LENGTH 1024
++typedef struct _gcsINTEGERDB * gckINTEGERDB;
++typedef struct _gcsINTEGERDB
++{
++ gckOS os;
++ gctPOINTER* table;
++ gctPOINTER mutex;
++ gctUINT32 tableLen;
++ gctUINT32 currentID;
++ gctUINT32 unused;
++}
++gcsINTEGERDB;
++
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Allocate a database. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gcsINTEGERDB), (gctPOINTER *)&database));
++
++ gckOS_ZeroMemory(database, gcmSIZEOF(gcsINTEGERDB));
++
++ /* Allocate a pointer table. */
++ gcmkONERROR(gckOS_Allocate(
++ Kernel->os, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH, (gctPOINTER *)&database->table));
++
++ gckOS_ZeroMemory(database->table, gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH);
++
++ /* Allocate a database mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Kernel->os, &database->mutex));
++
++ /* Initialize. */
++ database->currentID = 0;
++ database->unused = gcdID_TABLE_LENGTH;
++ database->os = Kernel->os;
++ database->tableLen = gcdID_TABLE_LENGTH;
++
++ *Database = database;
++
++ gcmkFOOTER_ARG("*Database=0x%08X", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Rollback. */
++ if (database)
++ {
++ if (database->table)
++ {
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++ }
++
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ )
++{
++ gckINTEGERDB database = Database;
++
++ gcmkHEADER_ARG("Kernel=0x%08X Datbase=0x%08X", Kernel, Database);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Database != gcvNULL);
++
++ /* Destroy pointer table. */
++ gcmkOS_SAFE_FREE(Kernel->os, database->table);
++
++ /* Destroy database mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->mutex));
++
++ /* Destroy database. */
++ gcmkOS_SAFE_FREE(Kernel->os, database);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctUINT32 i, unused, currentID, tableLen;
++ gctPOINTER * table;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Pointer=0x%08X", Database, Pointer);
++
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (database->unused < 1)
++ {
++ /* Extend table. */
++ gcmkONERROR(
++ gckOS_Allocate(os,
++ gcmSIZEOF(gctPOINTER) * (database->tableLen + gcdID_TABLE_LENGTH),
++ (gctPOINTER *)&table));
++
++ gckOS_ZeroMemory(table + database->tableLen,
++ gcmSIZEOF(gctPOINTER) * gcdID_TABLE_LENGTH);
++
++ /* Copy data from old table. */
++ gckOS_MemCopy(table,
++ database->table,
++ database->tableLen * gcmSIZEOF(gctPOINTER));
++
++ gcmkOS_SAFE_FREE(os, database->table);
++
++ /* Update databse with new allocated table. */
++ database->table = table;
++ database->currentID = database->tableLen;
++ database->tableLen += gcdID_TABLE_LENGTH;
++ database->unused += gcdID_TABLE_LENGTH;
++ }
++
++ table = database->table;
++ currentID = database->currentID;
++ tableLen = database->tableLen;
++ unused = database->unused;
++
++ /* Connect id with pointer. */
++ table[currentID] = Pointer;
++
++ *Id = currentID + 1;
++
++ /* Update the currentID. */
++ if (--unused > 0)
++ {
++ for (i = 0; i < tableLen; i++)
++ {
++ if (++currentID >= tableLen)
++ {
++ /* Wrap to the begin. */
++ currentID = 0;
++ }
++
++ if (table[currentID] == gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ database->table = table;
++ database->currentID = currentID;
++ database->tableLen = tableLen;
++ database->unused = unused;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_ARG("*Id=%d", *Id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ database->table[Id] = gcvNULL;
++
++ if (database->unused++ == 0)
++ {
++ database->currentID = Id;
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ )
++{
++ gceSTATUS status;
++ gckINTEGERDB database = Database;
++ gctPOINTER pointer;
++ gckOS os = database->os;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Database=0x%08X Id=%d", Database, Id);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ gcmkVERIFY_OK(gckOS_AcquireMutex(os, database->mutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if (!(Id > 0 && Id <= database->tableLen))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ Id -= 1;
++
++ pointer = database->table[Id];
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ acquired = gcvFALSE;
++
++ if (pointer)
++ {
++ *Pointer = pointer;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++
++ gcmkFOOTER_ARG("*Pointer=0x%08X", *Pointer);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(os, database->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gctUINT32 name;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Pointer=0x%X", Kernel, Pointer);
++
++ gcmkONERROR(
++ gckKERNEL_AllocateIntegerId(database, Pointer, &name));
++
++ gcmkFOOTER_ARG("name=%d", name);
++ return name;
++
++OnError:
++ gcmkFOOTER();
++ return 0;
++}
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer = gcvNULL;
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=%d", Kernel, Name);
++
++ /* Lookup in database to get pointer. */
++ gcmkONERROR(gckKERNEL_QueryIntegerId(database, Name, &pointer));
++
++ gcmkFOOTER_ARG("pointer=0x%X", pointer);
++ return pointer;
++
++OnError:
++ gcmkFOOTER();
++ return gcvNULL;
++}
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ )
++{
++ gctPOINTER database = Kernel->db->pointerDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%X Name=0x%X", Kernel, Name);
++
++ /* Free name if exists. */
++ gcmkVERIFY_OK(gckKERNEL_FreeIntegerId(database, Name));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_command.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_command.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_command.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_command.c 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,3042 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_context.h"
++
++#ifdef __QNXNTO__
++#include <sys/slog.h>
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _NewQueue
++**
++** Allocate a new command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** OUTPUT:
++**
++** gckCOMMAND Command
++** gckCOMMAND object has been updated with a new command queue.
++*/
++static gceSTATUS
++_NewQueue(
++ IN OUT gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctINT currentIndex, newIndex;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Switch to the next command buffer. */
++ currentIndex = Command->index;
++ newIndex = (currentIndex + 1) % gcdCOMMAND_QUEUES;
++
++ /* Wait for availability. */
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.waitsignal]");
++#endif
++
++ gcmkONERROR(gckOS_WaitSignal(
++ Command->os,
++ Command->queues[newIndex].signal,
++ gcvINFINITE
++ ));
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ if (newIndex < currentIndex)
++ {
++ Command->wrapCount += 1;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 2 * 4,
++ "%s(%d): queue array wrapped around.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): total queue wrap arounds %d.\n",
++ __FUNCTION__, __LINE__, Command->wrapCount
++ );
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ 3 * 4,
++ "%s(%d): switched to queue %d.\n",
++ __FUNCTION__, __LINE__, newIndex
++ );
++#endif
++
++ /* Update gckCOMMAND object with new command queue. */
++ Command->index = newIndex;
++ Command->newQueue = gcvTRUE;
++ Command->logical = Command->queues[newIndex].logical;
++ Command->offset = 0;
++
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(
++ Command->os,
++ Command->logical,
++ (gctUINT32 *) &Command->physical
++ ));
++
++ if (currentIndex != -1)
++ {
++ /* Mark the command queue as available. */
++ gcmkONERROR(gckEVENT_Signal(
++ Command->kernel->eventObj,
++ Command->queues[currentIndex].signal,
++ gcvKERNEL_COMMAND
++ ));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("Command->index=%d", Command->index);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_IncrementCommitAtom(
++ IN gckCOMMAND Command,
++ IN gctBOOL Increment
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctINT32 atomValue;
++ gctBOOL powerAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Grab the power mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, hardware->powerMutex, gcvINFINITE
++ ));
++ powerAcquired = gcvTRUE;
++
++ /* Increment the commit atom. */
++ if (Increment)
++ {
++ gcmkONERROR(gckOS_AtomIncrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AtomDecrement(
++ Command->os, Command->atomCommit, &atomValue
++ ));
++ }
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ powerAcquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerAcquired)
++ {
++ /* Release the power mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(
++ Command->os, hardware->powerMutex
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++_ProcessHints(
++ IN gckCOMMAND Command,
++ IN gctUINT32 ProcessID,
++ IN gcoCMDBUF CommandBuffer
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gckKERNEL kernel;
++ gctBOOL needCopy = gcvFALSE;
++ gcskSECURE_CACHE_PTR cache;
++ gctUINT8_PTR commandBufferLogical;
++ gctUINT8_PTR hintedData;
++ gctUINT32_PTR hintArray;
++ gctUINT i, hintCount;
++
++ gcmkHEADER_ARG(
++ "Command=0x%08X ProcessID=%d CommandBuffer=0x%08X",
++ Command, ProcessID, CommandBuffer
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Reset state array pointer. */
++ hintArray = gcvNULL;
++
++ /* Get the kernel object. */
++ kernel = Command->kernel;
++
++ /* Get the cache form the database. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(kernel, ProcessID, &cache));
++
++ /* Determine the start of the command buffer. */
++ commandBufferLogical
++ = (gctUINT8_PTR) CommandBuffer->logical
++ + CommandBuffer->startOffset;
++
++ /* Determine the number of records in the state array. */
++ hintCount = CommandBuffer->hintArrayTail - CommandBuffer->hintArray;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++ /* Get access to the state array. */
++ if (needCopy)
++ {
++ gctUINT copySize;
++
++ if (Command->hintArrayAllocated &&
++ (Command->hintArraySize < CommandBuffer->hintArraySize))
++ {
++ gcmkONERROR(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArraySize = gcvFALSE;
++ }
++
++ if (!Command->hintArrayAllocated)
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_Allocate(
++ Command->os,
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ Command->hintArray = gcmPTR_TO_UINT64(pointer);
++ Command->hintArrayAllocated = gcvTRUE;
++ Command->hintArraySize = CommandBuffer->hintArraySize;
++ }
++
++ hintArray = gcmUINT64_TO_PTR(Command->hintArray);
++ copySize = hintCount * gcmSIZEOF(gctUINT32);
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ hintArray,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ copySize
++ ));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ &pointer
++ ));
++
++ hintArray = pointer;
++ }
++
++ /* Scan through the buffer. */
++ for (i = 0; i < hintCount; i += 1)
++ {
++ /* Determine the location of the hinted data. */
++ hintedData = commandBufferLogical + hintArray[i];
++
++ /* Map handle into physical address. */
++ gcmkONERROR(gckKERNEL_MapLogicalToPhysical(
++ kernel, cache, (gctPOINTER) hintedData
++ ));
++ }
++
++OnError:
++ /* Get access to the state array. */
++ if (!needCopy && (hintArray != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ gcmUINT64_TO_PTR(CommandBuffer->hintArray),
++ CommandBuffer->hintArraySize,
++ hintArray
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_FlushMMU(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckHARDWARE hardware = Command->kernel->hardware;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckHARDWARE_FlushMMU(hardware));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++static void
++_DumpBuffer(
++ IN gctPOINTER Buffer,
++ IN gctUINT32 GpuAddress,
++ IN gctSIZE_T Size
++ )
++{
++ gctINT i, line, left;
++ gctUINT32_PTR data = Buffer;
++
++ line = Size / 32;
++ left = Size % 32;
++
++
++ for (i = 0; i < line; i++)
++ {
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]);
++ data += 8;
++ GpuAddress += 8 * 4;
++ }
++
++ switch(left)
++ {
++ case 28:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5], data[6]);
++ break;
++ case 24:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4], data[5]);
++ break;
++ case 20:
++ gcmkPRINT("%X : %08X %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3], data[4]);
++ break;
++ case 16:
++ gcmkPRINT("%X : %08X %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2], data[3]);
++ break;
++ case 12:
++ gcmkPRINT("%X : %08X %08X %08X ",
++ GpuAddress, data[0], data[1], data[2]);
++ break;
++ case 8:
++ gcmkPRINT("%X : %08X %08X ",
++ GpuAddress, data[0], data[1]);
++ break;
++ case 4:
++ gcmkPRINT("%X : %08X ",
++ GpuAddress, data[0]);
++ break;
++ default:
++ break;
++ }
++}
++
++static void
++_DumpKernelCommandBuffer(
++ IN gckCOMMAND Command
++)
++{
++ gctINT i;
++ gctUINT32 physical;
++ gctPOINTER entry;
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
++ {
++ entry = Command->queues[i].logical;
++
++ gckOS_GetPhysicalAddress(Command->os, entry, &physical);
++
++ gcmkPRINT("Kernel command buffer %d\n", i);
++
++ _DumpBuffer(entry, physical, Command->pageSize);
++ }
++}
++#endif
++
++/******************************************************************************\
++****************************** gckCOMMAND API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckCOMMAND_Construct
++**
++** Construct a new gckCOMMAND object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckCOMMAND * Command
++** Pointer to a variable that will hold the pointer to the gckCOMMAND
++** object.
++*/
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ )
++{
++ gckOS os;
++ gckCOMMAND command = gcvNULL;
++ gceSTATUS status;
++ gctINT i;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ /* Extract the gckOS object. */
++ os = Kernel->os;
++
++ /* Allocate the gckCOMMAND structure. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckCOMMAND), &pointer));
++ command = pointer;
++
++ /* Reset the entire object. */
++ gcmkONERROR(gckOS_ZeroMemory(command, gcmSIZEOF(struct _gckCOMMAND)));
++
++ /* Initialize the gckCOMMAND object.*/
++ command->object.type = gcvOBJ_COMMAND;
++ command->kernel = Kernel;
++ command->os = os;
++
++ /* Get the command buffer requirements. */
++ gcmkONERROR(gckHARDWARE_QueryCommandBuffer(
++ Kernel->hardware,
++ &command->alignment,
++ &command->reservedHead,
++ &command->reservedTail
++ ));
++
++ /* Create the command queue mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexQueue));
++
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Create the context switching mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &command->mutexContextSeq));
++#endif
++
++ /* Create the power management semaphore. */
++ gcmkONERROR(gckOS_CreateSemaphore(os, &command->powerSemaphore));
++
++ /* Create the commit atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &command->atomCommit));
++
++ /* Get the page size from teh OS. */
++ gcmkONERROR(gckOS_GetPageSize(os, &command->pageSize));
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&command->kernelProcessID));
++
++ /* Set hardware to pipe 0. */
++ command->pipeSelect = gcvPIPE_INVALID;
++
++ /* Pre-allocate the command queues. */
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(
++ os,
++ gcvFALSE,
++ &command->pageSize,
++ &command->queues[i].physical,
++ &command->queues[i].logical
++ ));
++
++ gcmkONERROR(gckOS_CreateSignal(
++ os, gcvFALSE, &command->queues[i].signal
++ ));
++
++ gcmkONERROR(gckOS_Signal(
++ os, command->queues[i].signal, gcvTRUE
++ ));
++ }
++
++ /* No command queue in use yet. */
++ command->index = -1;
++ command->logical = gcvNULL;
++ command->newQueue = gcvFALSE;
++
++ /* Command is not yet running. */
++ command->running = gcvFALSE;
++
++ /* Command queue is idle. */
++ command->idle = gcvTRUE;
++
++ /* Commit stamp is zero. */
++ command->commitStamp = 0;
++
++ /* END event signal not created. */
++ command->endEventSignal = gcvNULL;
++
++ /* Return pointer to the gckCOMMAND object. */
++ *Command = command;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Command=0x%x", *Command);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ if (command->atomCommit != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, command->atomCommit));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(os, command->powerSemaphore));
++ }
++
++ if (command->mutexContext != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContext));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (command->mutexContextSeq != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexContextSeq));
++ }
++#endif
++
++ if (command->mutexQueue != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, command->mutexQueue));
++ }
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ if (command->queues[i].signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ os, command->queues[i].signal
++ ));
++ }
++
++ if (command->queues[i].logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ os,
++ command->pageSize,
++ command->queues[i].physical,
++ command->queues[i].logical
++ ));
++ }
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, command));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Destroy
++**
++** Destroy an gckCOMMAND object.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Stop the command queue. */
++ gcmkVERIFY_OK(gckCOMMAND_Stop(Command, gcvFALSE));
++
++ for (i = 0; i < gcdCOMMAND_QUEUES; ++i)
++ {
++ gcmkASSERT(Command->queues[i].signal != gcvNULL);
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->queues[i].signal
++ ));
++
++ gcmkASSERT(Command->queues[i].logical != gcvNULL);
++ gcmkVERIFY_OK(gckOS_FreeNonPagedMemory(
++ Command->os,
++ Command->pageSize,
++ Command->queues[i].physical,
++ Command->queues[i].logical
++ ));
++ }
++
++ /* END event signal. */
++ if (Command->endEventSignal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, Command->endEventSignal
++ ));
++ }
++
++ /* Delete the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContext));
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (Command->mutexContextSeq != gcvNULL)
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexContextSeq));
++#endif
++
++ /* Delete the command queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Command->os, Command->mutexQueue));
++
++ /* Destroy the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(Command->os, Command->powerSemaphore));
++
++ /* Destroy the commit atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Command->os, Command->atomCommit));
++
++#if gcdSECURE_USER
++ /* Free state array. */
++ if (Command->hintArrayAllocated)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, gcmUINT64_TO_PTR(Command->hintArray)));
++ Command->hintArrayAllocated = gcvFALSE;
++ }
++#endif
++
++ /* Mark object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckCOMMAND object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Command->os, Command));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_EnterCommit
++**
++** Acquire command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctBOOL atomIncremented = gcvFALSE;
++ gctBOOL semaAcquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (!FromPower)
++ {
++ /* Increment COMMIT atom to let power management know that a commit is
++ ** in progress. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvTRUE));
++ atomIncremented = gcvTRUE;
++
++ /* Notify the system the GPU has a commit. */
++ gcmkONERROR(gckOS_Broadcast(Command->os,
++ hardware,
++ gcvBROADCAST_GPU_COMMIT));
++
++ /* Acquire the power management semaphore. */
++ gcmkONERROR(gckOS_AcquireSemaphore(Command->os,
++ Command->powerSemaphore));
++ semaAcquired = gcvTRUE;
++ }
++
++ /* Grab the conmmand queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Command->os,
++ Command->mutexQueue,
++ gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (semaAcquired)
++ {
++ /* Release the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++ }
++
++ if (atomIncremented)
++ {
++ /* Decrement the commit atom. */
++ gcmkVERIFY_OK(_IncrementCommitAtom(
++ Command, gcvFALSE
++ ));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_ExitCommit
++**
++** Release command queue synchronization objects.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to destroy.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Release the power mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexQueue));
++
++ if (!FromPower)
++ {
++ /* Release the power management semaphore. */
++ gcmkONERROR(gckOS_ReleaseSemaphore(Command->os,
++ Command->powerSemaphore));
++
++ /* Decrement the commit atom. */
++ gcmkONERROR(_IncrementCommitAtom(Command, gcvFALSE));
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Start
++**
++** Start up the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to start.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitLinkBytes;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->running)
++ {
++ /* Command queue already running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (Command->logical == gcvNULL)
++ {
++ /* Start at beginning of a new queue. */
++ gcmkONERROR(_NewQueue(Command));
++ }
++
++ /* Start at beginning of page. */
++ Command->offset = 0;
++
++ /* Set abvailable number of bytes for WAIT/LINK command sequence. */
++ waitLinkBytes = Command->pageSize;
++
++ /* Append WAIT/LINK. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ Command->logical,
++ 0,
++ &waitLinkBytes,
++ &waitOffset,
++ &Command->waitSize
++ ));
++
++ Command->waitLogical = (gctUINT8_PTR) Command->logical + waitOffset;
++ Command->waitPhysical = (gctUINT8_PTR) Command->physical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the wait/link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->physical,
++ Command->logical,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Adjust offset. */
++ Command->offset = waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Enable command processor. */
++#ifdef __QNXNTO__
++ gcmkONERROR(gckHARDWARE_Execute(
++ hardware,
++ Command->logical,
++ Command->physical,
++ gcvTRUE,
++ waitLinkBytes
++ ));
++#else
++ gcmkONERROR(gckHARDWARE_Execute(
++ hardware,
++ Command->logical,
++ waitLinkBytes
++ ));
++#endif
++
++ /* Command queue is running. */
++ Command->running = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stop
++**
++** Stop the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object to stop.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ )
++{
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gctUINT32 idle;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (!Command->running)
++ {
++ /* Command queue is not running. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract the gckHARDWARE object. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ if (gckHARDWARE_IsFeatureAvailable(hardware,
++ gcvFEATURE_END_EVENT) == gcvSTATUS_TRUE)
++ {
++ /* Allocate the signal. */
++ if (Command->endEventSignal == gcvNULL)
++ {
++ gcmkONERROR(gckOS_CreateSignal(Command->os,
++ gcvTRUE,
++ &Command->endEventSignal));
++ }
++
++ /* Append the END EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Stop(Command->kernel->eventObj,
++ Command->kernelProcessID,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->endEventSignal,
++ &Command->waitSize));
++ }
++ else
++ {
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ hardware, Command->waitLogical, &Command->waitSize
++ ));
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(Command->kernel->hardware,
++ Command->logical,
++ Command->offset));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ /* Wait for idle. */
++ gcmkONERROR(gckHARDWARE_GetIdle(hardware, !FromRecovery, &idle));
++ }
++
++ /* Command queue is no longer running. */
++ Command->running = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Commit
++**
++** Commit a command buffer to the command queue.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object.
++**
++** gcoCMDBUF CommandBuffer
++** Pointer to a gcoCMDBUF object.
++**
++** gcsSTATE_DELTA_PTR StateDelta
++** Pointer to the state delta.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gctBOOL commitEntered = gcvFALSE;
++ gctBOOL contextAcquired = gcvFALSE;
++ gckHARDWARE hardware;
++ gctBOOL needCopy = gcvFALSE;
++ gcsQUEUE_PTR eventRecord = gcvNULL;
++ gcsQUEUE _eventRecord;
++ gcsQUEUE_PTR nextEventRecord;
++ gctBOOL commandBufferMapped = gcvFALSE;
++ gcoCMDBUF commandBufferObject = gcvNULL;
++
++#if !gcdNULL_DRIVER
++ gcsCONTEXT_PTR contextBuffer;
++ struct _gcoCMDBUF _commandBufferObject;
++ gctPHYS_ADDR commandBufferPhysical;
++ gctUINT8_PTR commandBufferLogical;
++ gctUINT8_PTR commandBufferLink;
++ gctUINT commandBufferSize;
++ gctSIZE_T nopBytes;
++ gctSIZE_T pipeBytes;
++ gctSIZE_T linkBytes;
++ gctSIZE_T bytes;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR entryPhysical;
++#endif
++ gctPOINTER entryLogical;
++ gctSIZE_T entryBytes;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR exitPhysical;
++#endif
++ gctPOINTER exitLogical;
++ gctSIZE_T exitBytes;
++ gctPHYS_ADDR waitLinkPhysical;
++ gctPOINTER waitLinkLogical;
++ gctSIZE_T waitLinkBytes;
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitSize;
++
++#if gcdDUMP_COMMAND
++ gctPOINTER contextDumpLogical = gcvNULL;
++ gctSIZE_T contextDumpBytes = 0;
++ gctPOINTER bufferDumpLogical = gcvNULL;
++ gctSIZE_T bufferDumpBytes = 0;
++# endif
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++ gctBOOL sequenceAcquired = gcvFALSE;
++#endif
++
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG(
++ "Command=0x%x CommandBuffer=0x%x ProcessID=%d",
++ Command, CommandBuffer, ProcessID
++ );
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ if (Command->kernel->core == gcvCORE_2D)
++ {
++ /* There is no context for 2D. */
++ Context = gcvNULL;
++ }
++
++ gcmkONERROR(_FlushMMU(Command));
++
++#if VIVANTE_PROFILER_CONTEXT
++ if((Command->kernel->hardware->gpuProfiler) && (Command->kernel->profileEnable))
++ {
++ /* Acquire the context sequnence mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContextSeq, gcvINFINITE
++ ));
++ sequenceAcquired = gcvTRUE;
++ }
++#endif
++
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(Command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ contextAcquired = gcvTRUE;
++
++ /* Extract the gckHARDWARE and gckEVENT objects. */
++ hardware = Command->kernel->hardware;
++
++ /* Check wehther we need to copy the structures or not. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Command->os, ProcessID, &needCopy));
++
++#if gcdNULL_DRIVER
++ /* Context switch required? */
++ if ((Context != gcvNULL) && (Command->currContext != Context))
++ {
++ /* Yes, merge in the deltas. */
++ gckCONTEXT_Update(Context, ProcessID, StateDelta);
++
++ /* Update the current context. */
++ Command->currContext = Context;
++ }
++#else
++ if (needCopy)
++ {
++ commandBufferObject = &_commandBufferObject;
++
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os,
++ commandBufferObject,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF)
++ ));
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ }
++ else
++ {
++ gcmkONERROR(gckOS_MapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ &pointer
++ ));
++
++ commandBufferObject = pointer;
++
++ gcmkVERIFY_OBJECT(commandBufferObject, gcvOBJ_COMMANDBUFFER);
++ commandBufferMapped = gcvTRUE;
++ }
++
++ /* Query the size of NOP command. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware, gcvNULL, &nopBytes
++ ));
++
++ /* Query the size of pipe select command sequence. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ hardware, gcvNULL, gcvPIPE_3D, &pipeBytes
++ ));
++
++ /* Query the size of LINK command. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware, gcvNULL, gcvNULL, 0, &linkBytes
++ ));
++
++ /* Compute the command buffer entry and the size. */
++ commandBufferLogical
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->startOffset;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Command->os,
++ commandBufferLogical,
++ (gctUINT32_PTR)&commandBufferPhysical
++ ));
++
++ commandBufferSize
++ = commandBufferObject->offset
++ + Command->reservedTail
++ - commandBufferObject->startOffset;
++
++ /* Get the current offset. */
++ offset = Command->offset;
++
++ /* Compute number of bytes left in current kernel command queue. */
++ bytes = Command->pageSize - offset;
++
++ /* Query the size of WAIT/LINK command sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ gcvNULL,
++ offset,
++ &waitLinkBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < waitLinkBytes)
++ {
++ /* No, create a new one. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Get the new current offset. */
++ offset = Command->offset;
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - offset;
++ gcmkASSERT(bytes >= waitLinkBytes);
++ }
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + offset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + offset;
++
++ /* Context switch required? */
++ if (Context == gcvNULL)
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryBytes = commandBufferSize - offset;
++ }
++ else if (Command->currContext != Context)
++ {
++ /* Temporary disable context length oprimization. */
++ Context->dirty = gcvTRUE;
++
++ /* Get the current context buffer. */
++ contextBuffer = Context->buffer;
++
++ /* Yes, merge in the deltas. */
++ gcmkONERROR(gckCONTEXT_Update(Context, ProcessID, StateDelta));
++
++ /* Determine context entry and exit points. */
++ if (0)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D and 3D are used.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++
++ /* Mark context as not dirty. */
++ Context->dirty = gcvFALSE;
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 2D only command buffer.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++ /* Not using 2D. */
++ else
++ {
++ /* Mark 2D as dirty. */
++ Context->dirty2D = gcvTRUE;
++
++ /* Store the current context buffer. */
++ Context->dirtyBuffer = contextBuffer;
++
++ if (Context->dirty || commandBufferObject->using3D)
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: 3D only command buffer.
++ */
++
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + pipeBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /***************************************************************
++ ** SWITCHING CONTEXT: "XD" command buffer - neither 2D nor 3D.
++ */
++
++ /* Mark 3D as dirty. */
++ Context->dirty3D = gcvTRUE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_3D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom3D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom3D;
++
++ entryBytes
++ = Context->bufferSize
++ - Context->entryOffsetXDFrom3D;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical
++ = (gctUINT8_PTR) contextBuffer->physical
++ + Context->entryOffsetXDFrom2D;
++#endif
++ entryLogical
++ = (gctUINT8_PTR) contextBuffer->logical
++ + Context->entryOffsetXDFrom2D;
++
++ entryBytes
++ = Context->totalSize
++ - Context->entryOffsetXDFrom2D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the context buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ entryPhysical,
++ entryLogical,
++ entryBytes
++ ));
++#endif
++
++ /* Update the current context. */
++ Command->currContext = Context;
++
++#if gcdDUMP_COMMAND
++ contextDumpLogical = entryLogical;
++ contextDumpBytes = entryBytes;
++#endif
++ }
++
++ /* Same context. */
++ else
++ {
++ /* Determine context entry and exit points. */
++ if (commandBufferObject->using2D && Context->dirty2D)
++ {
++ /* Reset 2D dirty flag. */
++ Context->dirty2D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->bufferSize - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->bufferSize;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Ensure the NOP between 2D and 3D is in place so that the
++ execution falls through from 2D to 3D. */
++ gcmkONERROR(gckHARDWARE_Nop(
++ hardware,
++ contextBuffer->link2D,
++ &nopBytes
++ ));
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* Compute the entry. */
++ if (Command->pipeSelect == gcvPIPE_2D)
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + pipeBytes;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + pipeBytes;
++ entryBytes = Context->entryOffset3D - pipeBytes;
++ }
++ else
++ {
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical;
++ entryBytes = Context->entryOffset3D;
++ }
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_2D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* 3D is not used, generate a LINK from the end of 2D part of
++ the context buffer to the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link2D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ }
++ else
++ {
++ if (commandBufferObject->using3D && Context->dirty3D)
++ {
++ /* Reset 3D dirty flag. */
++ Context->dirty3D = gcvFALSE;
++
++ /* Get the "dirty" context buffer. */
++ contextBuffer = Context->dirtyBuffer;
++
++ /* Determine context buffer entry offset. */
++ offset = (Command->pipeSelect == gcvPIPE_3D)
++
++ /* Skip pipe switching sequence. */
++ ? Context->entryOffset3D + pipeBytes
++
++ /* Do not skip pipe switching sequence. */
++ : Context->entryOffset3D;
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) contextBuffer->physical + offset;
++#endif
++ entryLogical = (gctUINT8_PTR) contextBuffer->logical + offset;
++ entryBytes = Context->bufferSize - offset;
++
++ /* See if we have to switch pipes between the context
++ and command buffers. */
++ if (commandBufferObject->entryPipe == gcvPIPE_3D)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the initial context pipes are
++ different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Generate a LINK from the context buffer to
++ the command buffer. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ contextBuffer->link3D,
++ commandBufferLogical + offset,
++ commandBufferSize - offset,
++ &linkBytes
++ ));
++ }
++ else
++ {
++ /* See if we have to switch pipes for the command buffer. */
++ if (commandBufferObject->entryPipe == Command->pipeSelect)
++ {
++ /* Skip pipe switching sequence. */
++ offset = pipeBytes;
++ }
++ else
++ {
++ /* The current hardware and the entry command buffer pipes
++ ** are different, switch to the correct pipe. */
++ gcmkONERROR(gckHARDWARE_PipeSelect(
++ Command->kernel->hardware,
++ commandBufferLogical,
++ commandBufferObject->entryPipe,
++ &pipeBytes
++ ));
++
++ /* Do not skip pipe switching sequence. */
++ offset = 0;
++ }
++
++ /* Compute the entry. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ entryPhysical = (gctUINT8_PTR) commandBufferPhysical + offset;
++#endif
++ entryLogical = commandBufferLogical + offset;
++ entryBytes = commandBufferSize - offset;
++ }
++ }
++ }
++
++#if gcdDUMP_COMMAND
++ bufferDumpLogical = commandBufferLogical + offset;
++ bufferDumpBytes = commandBufferSize - offset;
++#endif
++
++#if gcdSECURE_USER
++ /* Process user hints. */
++ gcmkONERROR(_ProcessHints(Command, ProcessID, commandBufferObject));
++#endif
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = Command->physical;
++#endif
++ exitLogical = Command->logical;
++ exitBytes = Command->offset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump to the new
++ WAIT/LINK command sequence. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ exitPhysical = waitLinkPhysical;
++#endif
++ exitLogical = waitLinkLogical;
++ exitBytes = waitLinkBytes;
++ }
++
++ /* Add a new WAIT/LINK command sequence. When the command buffer which is
++ currently being scheduled is fully executed by the GPU, the FE will
++ jump to this WAIT/LINK sequence. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ hardware,
++ waitLinkLogical,
++ offset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitSize
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = (gctUINT8_PTR) waitLinkLogical + waitOffset;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command queue cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ exitPhysical,
++ exitLogical,
++ exitBytes
++ ));
++#endif
++
++ /* Determine the location of the LINK command in the command buffer. */
++ commandBufferLink
++ = (gctUINT8_PTR) gcmUINT64_TO_PTR(commandBufferObject->logical)
++ + commandBufferObject->offset;
++
++ /* Generate a LINK from the end of the command buffer being scheduled
++ back to the kernel command queue. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ commandBufferLink,
++ exitLogical,
++ exitBytes,
++ &linkBytes
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the command buffer cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ ProcessID,
++ gcvNULL,
++ commandBufferPhysical,
++ commandBufferLogical,
++ commandBufferSize
++ ));
++#endif
++
++ /* Generate a LINK from the previous WAIT/LINK command sequence to the
++ entry determined above (either the context or the command buffer).
++ This LINK replaces the WAIT instruction from the previous WAIT/LINK
++ pair, therefore we use WAIT metrics for generation of this LINK.
++ This action will execute the entire sequence. */
++ gcmkONERROR(gckHARDWARE_Link(
++ hardware,
++ Command->waitLogical,
++ entryLogical,
++ entryBytes,
++ &Command->waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the link. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ contextDumpLogical,
++ contextDumpBytes,
++ gceDUMP_BUFFER_CONTEXT,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ bufferDumpLogical,
++ bufferDumpBytes,
++ gceDUMP_BUFFER_USER,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ waitLinkLogical,
++ waitLinkBytes,
++ gceDUMP_BUFFER_WAITLINK,
++ gcvFALSE
++ );
++
++ /* Update the current pipe. */
++ Command->pipeSelect = commandBufferObject->exitPipe;
++
++ /* Update command queue offset. */
++ Command->offset += waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update address of last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitSize;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.commit]");
++#endif
++#endif /* gcdNULL_DRIVER */
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ contextAcquired = gcvFALSE;
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ commitEntered = gcvFALSE;
++
++#if VIVANTE_PROFILER_CONTEXT
++ if(sequenceAcquired)
++ {
++ gcmkONERROR(gckCOMMAND_Stall(Command, gcvTRUE));
++ if (Command->currContext)
++ {
++ gcmkONERROR(gckHARDWARE_UpdateContextProfile(
++ hardware,
++ Command->currContext));
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ sequenceAcquired = gcvFALSE;
++ }
++#endif
++
++ /* Loop while there are records in the queue. */
++ while (EventQueue != gcvNULL)
++ {
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ eventRecord = &_eventRecord;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(
++ Command->os, eventRecord, EventQueue, gcmSIZEOF(gcsQUEUE)
++ ));
++ }
++ else
++ {
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ eventRecord = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(gckEVENT_AddList(
++ Command->kernel->eventObj, &eventRecord->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE
++ ));
++
++ /* Next record in the queue. */
++ nextEventRecord = gcmUINT64_TO_PTR(eventRecord->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os, EventQueue, gcmSIZEOF(gcsQUEUE), (gctPOINTER *) eventRecord
++ ));
++
++ eventRecord = gcvNULL;
++ }
++
++ EventQueue = nextEventRecord;
++ }
++
++ if (Command->kernel->eventObj->queueHead == gcvNULL
++ && Command->kernel->hardware->powerManagement == gcvTRUE
++ )
++ {
++ /* Commit done event by which work thread knows all jobs done. */
++ gcmkVERIFY_OK(
++ gckEVENT_CommitDone(Command->kernel->eventObj, gcvKERNEL_PIXEL));
++ }
++
++ /* Submit events. */
++ status = gckEVENT_Submit(Command->kernel->eventObj, gcvTRUE, gcvFALSE);
++
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkTRACE(
++ gcvLEVEL_INFO,
++ "%s(%d): Intterupted in gckEVENT_Submit",
++ __FUNCTION__, __LINE__
++ );
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkONERROR(status);
++ }
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkONERROR(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++
++ commandBufferMapped = gcvFALSE;
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((eventRecord != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ EventQueue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) eventRecord
++ ));
++ }
++
++ if (contextAcquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ }
++
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(Command, gcvFALSE));
++ }
++
++#if VIVANTE_PROFILER_CONTEXT
++ if (sequenceAcquired)
++ {
++ /* Release the context sequence mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContextSeq));
++ }
++#endif
++
++ /* Unmap the command buffer pointer. */
++ if (commandBufferMapped)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(
++ Command->os,
++ CommandBuffer,
++ gcmSIZEOF(struct _gcoCMDBUF),
++ commandBufferObject
++ ));
++ }
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Reserve
++**
++** Reserve space in the command queue. Also acquire the command queue mutex.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** gctPOINTER * Buffer
++** Pointer to a variable that will receive the address of the reserved
++** space.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable that will receive the number of bytes
++** available in the command queue.
++*/
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctSIZE_T * BufferSize
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gctSIZE_T requiredBytes;
++ gctUINT32 requestedAligned;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute aligned number of reuested bytes. */
++ requestedAligned = gcmALIGN(RequestedBytes, Command->alignment);
++
++ /* Another WAIT/LINK command sequence will have to be appended after
++ the requested area being reserved. Compute the number of bytes
++ required for WAIT/LINK at the location after the reserved area. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ gcvNULL,
++ Command->offset + requestedAligned,
++ &requiredBytes,
++ gcvNULL,
++ gcvNULL
++ ));
++
++ /* Compute total number of bytes required. */
++ requiredBytes += requestedAligned;
++
++ /* Compute number of bytes available in command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Is there enough space in the current command queue? */
++ if (bytes < requiredBytes)
++ {
++ /* Create a new command queue. */
++ gcmkONERROR(_NewQueue(Command));
++
++ /* Recompute the number of bytes in the new kernel command queue. */
++ bytes = Command->pageSize - Command->offset;
++
++ /* Still not enough space? */
++ if (bytes < requiredBytes)
++ {
++ /* Rare case, not enough room in command queue. */
++ gcmkONERROR(gcvSTATUS_BUFFER_TOO_SMALL);
++ }
++ }
++
++ /* Return pointer to empty slot command queue. */
++ *Buffer = (gctUINT8 *) Command->logical + Command->offset;
++
++ /* Return number of bytes left in command queue. */
++ *BufferSize = bytes;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Buffer=0x%x *BufferSize=%lu", *Buffer, *BufferSize);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Execute
++**
++** Execute a previously reserved command queue by appending a WAIT/LINK command
++** sequence after it and modifying the last WAIT into a LINK command. The
++** command FIFO mutex will be released whether this function succeeds or not.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctSIZE_T RequestedBytes
++** Number of bytes previously reserved.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes
++ )
++{
++ gceSTATUS status;
++
++ gctPHYS_ADDR waitLinkPhysical;
++ gctUINT8_PTR waitLinkLogical;
++ gctUINT32 waitLinkOffset;
++ gctSIZE_T waitLinkBytes;
++
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctUINT32 waitOffset;
++ gctSIZE_T waitBytes;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gctPHYS_ADDR execPhysical;
++#endif
++ gctPOINTER execLogical;
++ gctSIZE_T execBytes;
++
++ gcmkHEADER_ARG("Command=0x%x RequestedBytes=%lu", Command, RequestedBytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Compute offset for WAIT/LINK. */
++ waitLinkOffset = Command->offset + RequestedBytes;
++
++ /* Compute number of bytes left in command queue. */
++ waitLinkBytes = Command->pageSize - waitLinkOffset;
++
++ /* Compute the location if WAIT/LINK command sequence. */
++ waitLinkPhysical = (gctUINT8_PTR) Command->physical + waitLinkOffset;
++ waitLinkLogical = (gctUINT8_PTR) Command->logical + waitLinkOffset;
++
++ /* Append WAIT/LINK in command queue. */
++ gcmkONERROR(gckHARDWARE_WaitLink(
++ Command->kernel->hardware,
++ waitLinkLogical,
++ waitLinkOffset,
++ &waitLinkBytes,
++ &waitOffset,
++ &waitBytes
++ ));
++
++ /* Compute the location if WAIT command. */
++ waitPhysical = (gctUINT8_PTR) waitLinkPhysical + waitOffset;
++ waitLogical = waitLinkLogical + waitOffset;
++
++ /* Determine the location to jump to for the command buffer being
++ ** scheduled. */
++ if (Command->newQueue)
++ {
++ /* New command queue, jump to the beginning of it. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = Command->physical;
++#endif
++ execLogical = Command->logical;
++ execBytes = waitLinkOffset + waitLinkBytes;
++ }
++ else
++ {
++ /* Still within the preexisting command queue, jump directly to the
++ reserved area. */
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ execPhysical = (gctUINT8 *) Command->physical + Command->offset;
++#endif
++ execLogical = (gctUINT8 *) Command->logical + Command->offset;
++ execBytes = RequestedBytes + waitLinkBytes;
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ execPhysical,
++ execLogical,
++ execBytes
++ ));
++#endif
++
++ /* Convert the last WAIT into a LINK. */
++ gcmkONERROR(gckHARDWARE_Link(
++ Command->kernel->hardware,
++ Command->waitLogical,
++ execLogical,
++ execBytes,
++ &Command->waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Command->os,
++ Command->kernelProcessID,
++ gcvNULL,
++ Command->waitPhysical,
++ Command->waitLogical,
++ Command->waitSize
++ ));
++#endif
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ Command->waitLogical,
++ Command->waitSize,
++ gceDUMP_BUFFER_LINK,
++ gcvFALSE
++ );
++
++ gcmkDUMPCOMMAND(
++ Command->os,
++ execLogical,
++ execBytes,
++ gceDUMP_BUFFER_KERNEL,
++ gcvFALSE
++ );
++
++ /* Update the pointer to the last WAIT. */
++ Command->waitPhysical = waitPhysical;
++ Command->waitLogical = waitLogical;
++ Command->waitSize = waitBytes;
++
++ /* Update the command queue. */
++ Command->offset += RequestedBytes + waitLinkBytes;
++ Command->newQueue = gcvFALSE;
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ Command->kernel->hardware, Command->logical, Command->offset
++ ));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.execute]");
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Stall
++**
++** The calling thread will be suspended until the command queue has been
++** completed.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to an gckCOMMAND object.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ )
++{
++#if gcdNULL_DRIVER
++ /* Do nothing with infinite hardware. */
++ return gcvSTATUS_OK;
++#else
++ gckOS os;
++ gckHARDWARE hardware;
++ gckEVENT eventObject;
++ gceSTATUS status;
++ gctSIGNAL signal = gcvNULL;
++ gctUINT timer = 0;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Extract the gckOS object pointer. */
++ os = Command->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Command->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Extract the gckEVENT object pointer. */
++ eventObject = Command->kernel->eventObj;
++ gcmkVERIFY_OBJECT(eventObject, gcvOBJ_EVENT);
++
++ /* Allocate the signal. */
++ gcmkONERROR(gckOS_CreateSignal(os, gcvTRUE, &signal));
++
++ /* Append the EVENT command to trigger the signal. */
++ gcmkONERROR(gckEVENT_Signal(eventObject, signal, gcvKERNEL_PIXEL));
++
++ /* Submit the event queue. */
++ gcmkONERROR(gckEVENT_Submit(eventObject, gcvTRUE, FromPower));
++
++#if gcdDUMP_COMMAND
++ gcmkPRINT("@[kernel.stall]");
++#endif
++
++ if (status == gcvSTATUS_CHIP_NOT_READY)
++ {
++ /* Error. */
++ goto OnError;
++ }
++
++ do
++ {
++ /* Wait for the signal. */
++ status = gckOS_WaitSignal(os, signal, gcdGPU_ADVANCETIMER);
++
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT32 idle;
++
++ /* Read idle register. */
++ gcmkVERIFY_OK(gckHARDWARE_GetIdle(
++ hardware, gcvFALSE, &idle
++ ));
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): idle=%08x",
++ __FUNCTION__, __LINE__, idle
++ );
++
++ gcmkONERROR(gckOS_MemoryBarrier(os, gcvNULL));
++
++#ifdef __QNXNTO__
++ gctUINT32 reg_cmdbuf_fetch;
++ gctUINT32 reg_intr;
++
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(
++ Command->kernel->hardware->os, Command->kernel->core, 0x0664, &reg_cmdbuf_fetch
++ ));
++
++ if (idle == 0x7FFFFFFE)
++ {
++ /*
++ * GPU is idle so there should not be pending interrupts.
++ * Just double check.
++ *
++ * Note that reading interrupt register clears it.
++ * That's why we don't read it in all cases.
++ */
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(
++ Command->kernel->hardware->os, Command->kernel->core, 0x10, &reg_intr
++ ));
++
++ slogf(
++ _SLOG_SETCODE(1, 0),
++ _SLOG_CRITICAL,
++ "GALcore: Stall timeout (idle = 0x%X, command buffer fetch = 0x%X, interrupt = 0x%X)",
++ idle, reg_cmdbuf_fetch, reg_intr
++ );
++ }
++ else
++ {
++ slogf(
++ _SLOG_SETCODE(1, 0),
++ _SLOG_CRITICAL,
++ "GALcore: Stall timeout (idle = 0x%X, command buffer fetch = 0x%X)",
++ idle, reg_cmdbuf_fetch
++ );
++ }
++#endif
++#endif
++ /* Advance timer. */
++ timer += gcdGPU_ADVANCETIMER;
++ }
++ else if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ }
++ while (gcmIS_ERROR(status)
++#if gcdGPU_TIMEOUT
++ && (timer < Command->kernel->timeOut)
++#endif
++ );
++
++ /* Bail out on timeout. */
++ if (gcmIS_ERROR(status))
++ {
++ /* Broadcast the stuck GPU. */
++ gcmkONERROR(gckOS_Broadcast(
++ os, hardware, gcvBROADCAST_GPU_STUCK
++ ));
++ }
++
++ /* Delete the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ /* Free the signal. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(os, signal));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++#endif
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Attach
++**
++** Attach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gctUINT32 ProcessID
++** Current process ID.
++**
++** OUTPUT:
++**
++** gckCONTEXT * Context
++** Pointer to a variable that will receive a pointer to a new
++** gckCONTEXT object.
++**
++** gctSIZE_T * StateCount
++** Pointer to a variable that will receive the number of states
++** in the context buffer.
++*/
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Construct(
++ Command->os,
++ Command->kernel->hardware,
++ ProcessID,
++ Context
++ ));
++
++ /* Return the number of states in the context. */
++ * StateCount = (* Context)->stateCount;
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Context=0x%x", *Context);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckCOMMAND_Detach
++**
++** Detach user process.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** gckCONTEXT Context
++** Pointer to a gckCONTEXT object to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x", Command, Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ /* Acquire the context switching mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(
++ Command->os, Command->mutexContext, gcvINFINITE
++ ));
++ acquired = gcvTRUE;
++
++ /* Construct a gckCONTEXT object. */
++ gcmkONERROR(gckCONTEXT_Destroy(Context));
++
++ if (Command->currContext == Context)
++ {
++ /* Detach from gckCOMMAND object if the destoryed context is current context. */
++ Command->currContext = gcvNULL;
++ }
++
++ /* Release the context switching mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release mutex. */
++ if (acquired)
++ {
++ /* Release the context switching mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Command->os, Command->mutexContext));
++ acquired = gcvFALSE;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++/*******************************************************************************
++**
++** gckCOMMAND_DumpExecutingBuffer
++**
++** Dump the command buffer which GPU is executing.
++**
++** INPUT:
++**
++** gckCOMMAND Command
++** Pointer to a gckCOMMAND object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gckVIRTUAL_COMMAND_BUFFER_PTR buffer;
++ gctUINT32 gpuAddress;
++ gctSIZE_T pageCount;
++ gctPOINTER entry;
++ gckOS os = Command->os;
++ gckKERNEL kernel = Command->kernel;
++#if gcdLINK_QUEUE_SIZE
++ gctINT pid;
++ gctINT i, rear;
++ gctUINT32 start, end;
++ gctUINT32 dumpFront, dumpRear;
++ gckLINKQUEUE queue = &kernel->hardware->linkQueue;
++ gckLINKQUEUE queueMirror;
++ gctUINT32 bytes;
++ gckLINKDATA linkData;
++#endif
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("**** COMMAND BUF DUMP ****\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkVERIFY_OK(gckOS_ReadRegisterEx(os, kernel->core, 0x664, &gpuAddress));
++
++ gcmkPRINT("DMA Address 0x%08X", gpuAddress);
++
++#if gcdLINK_QUEUE_SIZE
++ /* Duplicate queue because it will be changed.*/
++ gcmkONERROR(gckOS_AllocateMemory(os,
++ sizeof(struct _gckLINKQUEUE),
++ (gctPOINTER *)&queueMirror));
++
++ gcmkONERROR(gckOS_MemCopy(queueMirror,
++ queue,
++ sizeof(struct _gckLINKQUEUE)));
++
++ /* If kernel command buffer link to a context buffer, then link to a user command
++ ** buffer, the second link will be in queue first, so we must fix this.
++ ** In Queue: C1 U1 U2 C2 U3 U4 U5 C3
++ ** Real: C1 X1 U1 C2 U2 U3 U4 C3 U5
++ ** Command buffer X1 which is after C1 is out of queue, so C1 is meaningless.
++ */
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Can't find it in virtual command buffer list, ignore it. */
++ continue;
++ }
++
++ if (buffer->kernelLogical)
++ {
++ /* It is a context buffer. */
++ if (i == 0)
++ {
++ /* The real command buffer is out, so clear this slot. */
++ linkData->start = 0;
++ linkData->end = 0;
++ linkData->pid = 0;
++ }
++ else
++ {
++ /* switch context buffer and command buffer. */
++ struct _gckLINKDATA tmp = *linkData;
++ gckLINKDATA linkDataPrevious;
++
++ gckLINKQUEUE_GetData(queueMirror, i - 1, &linkDataPrevious);
++ *linkData = *linkDataPrevious;
++ *linkDataPrevious = tmp;
++ }
++ }
++ }
++
++ /* Clear search result. */
++ dumpFront = dumpRear = gcvINFINITE;
++
++ gcmkPRINT("Link Stack:");
++
++ /* Search stuck address in link queue from rear. */
++ rear = gcdLINK_QUEUE_SIZE - 1;
++ for (i = 0; i < gcdLINK_QUEUE_SIZE; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ start = linkData->start;
++ end = linkData->end;
++ pid = linkData->pid;
++
++ if (gpuAddress >= start && gpuAddress < end)
++ {
++ /* Find latest matched command buffer. */
++ gcmkPRINT(" %d, [%08X - %08X]", pid, start, end);
++
++ /* Initiliaze dump information. */
++ dumpFront = dumpRear = rear;
++ }
++
++ /* Advance to previous one. */
++ rear--;
++
++ if (dumpFront != gcvINFINITE)
++ {
++ break;
++ }
++ }
++
++ if (dumpFront == gcvINFINITE)
++ {
++ /* Can't find matched record in link queue, dump kernel command buffer. */
++ _DumpKernelCommandBuffer(Command);
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++ }
++
++ /* Search the last context buffer linked. */
++ while (rear >= 0)
++ {
++ gckLINKQUEUE_GetData(queueMirror, rear, &linkData);
++
++ gcmkPRINT(" %d, [%08X - %08X]",
++ linkData->pid,
++ linkData->start,
++ linkData->end);
++
++ status = gckKERNEL_QueryGPUAddress(kernel, linkData->start, &buffer);
++
++ if (gcmIS_SUCCESS(status) && buffer->kernelLogical)
++ {
++ /* Find a context buffer. */
++ dumpFront = rear;
++ break;
++ }
++
++ rear--;
++ }
++
++ /* Dump from last context buffer to last command buffer where hang happens. */
++ for (i = dumpFront; i <= dumpRear; i++)
++ {
++ gckLINKQUEUE_GetData(queueMirror, i, &linkData);
++
++ /* Get gpu address of this command buffer. */
++ gpuAddress = linkData->start;
++ bytes = linkData->end - gpuAddress;
++
++ /* Get the whole buffer. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkPRINT("Buffer [%08X - %08X] is lost",
++ linkData->start,
++ linkData->end);
++ continue;
++ }
++
++ /* Get kernel logical for dump. */
++ if (buffer->kernelLogical)
++ {
++ /* Get kernel logical directly if it is a context buffer. */
++ entry = buffer->kernelLogical;
++ gcmkPRINT("Context Buffer:");
++ }
++ else
++ {
++ /* Make it accessiable by kernel if it is a user command buffer. */
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(buffer->physical,
++ &pageCount,
++ &entry));
++ gcmkPRINT("User Command Buffer:");
++ }
++
++ /* Dump from the entry. */
++ _DumpBuffer(entry + (gpuAddress - buffer->gpuAddress), gpuAddress, bytes);
++
++ /* Release kernel logical address if neccessary. */
++ if (!buffer->kernelLogical)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyKernelVirtualMapping(entry));
++ }
++ }
++
++ /* Free local copy. */
++ gcmkOS_SAFE_FREE(os, queueMirror);
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++#else
++ /* Without link queue information, we don't know the entry of last command
++ ** buffer, just dump the page where GPU stuck. */
++ status = gckKERNEL_QueryGPUAddress(kernel, gpuAddress, &buffer);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CreateKernelVirtualMapping(buffer->physical, &pageCount, &entry));
++
++ if (entry)
++ {
++ gctUINT32 offset = gpuAddress - buffer->gpuAddress;
++ gctPOINTER entryDump = entry;
++
++ /* Dump one pages. */
++ gctUINT32 bytes = 4096;
++
++ /* Align to page. */
++ offset &= 0xfffff000;
++
++ /* Kernel address of page where stall point stay. */
++ entryDump += offset;
++
++ /* Align to page. */
++ gpuAddress &= 0xfffff000;
++
++ gcmkPRINT("User Command Buffer:\n");
++ _DumpBuffer(entryDump, gpuAddress, bytes);
++ }
++
++ gcmkVERIFY_OK(
++ gckOS_DestroyKernelVirtualMapping(entry));
++ }
++ else
++ {
++ _DumpKernelCommandBuffer(Command);
++ }
++
++ return gcvSTATUS_OK;
++#endif
++}
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_command_vg.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_command_vg.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_command_vg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_command_vg.c 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,3678 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#include "gc_hal_kernel_hardware_command_vg.h"
++
++#define _GC_OBJ_ZONE gcvZONE_COMMAND
++
++/******************************************************************************\
++*********************************** Debugging **********************************
++\******************************************************************************/
++
++#define gcvDISABLE_TIMEOUT 1
++#define gcvDUMP_COMMAND_BUFFER 0
++#define gcvDUMP_COMMAND_LINES 0
++
++
++#if gcvDEBUG || defined(EMULATOR) || gcvDISABLE_TIMEOUT
++# define gcvQUEUE_TIMEOUT ~0
++#else
++# define gcvQUEUE_TIMEOUT 10
++#endif
++
++
++/******************************************************************************\
++********************************** Definitions *********************************
++\******************************************************************************/
++
++/* Minimum buffer size. */
++#define gcvMINUMUM_BUFFER \
++ gcmSIZEOF(gcsKERNEL_QUEUE_HEADER) + \
++ gcmSIZEOF(gcsKERNEL_CMDQUEUE) * 2
++
++#define gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ static gceSTATUS \
++ _EventHandler_##Block##_##Number( \
++ IN gckVGKERNEL Kernel \
++ )
++
++#define gcmDEFINE_INTERRUPT_HANDLER(Block, Number) \
++ gcmDECLARE_INTERRUPT_HANDLER(Block, Number) \
++ { \
++ return _EventHandler_Block( \
++ Kernel, \
++ &Kernel->command->taskTable[gcvBLOCK_##Block], \
++ gcvFALSE \
++ ); \
++ }
++
++#define gcmDEFINE_INTERRUPT_HANDLER_ENTRY(Block, Number) \
++ { gcvBLOCK_##Block, _EventHandler_##Block##_##Number }
++
++/* Block interrupt handling table entry. */
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER * gcsBLOCK_INTERRUPT_HANDLER_PTR;
++typedef struct _gcsBLOCK_INTERRUPT_HANDLER
++{
++ gceBLOCK block;
++ gctINTERRUPT_HANDLER handler;
++}
++gcsBLOCK_INTERRUPT_HANDLER;
++
++/* Queue control functions. */
++typedef struct _gcsQUEUE_UPDATE_CONTROL * gcsQUEUE_UPDATE_CONTROL_PTR;
++typedef struct _gcsQUEUE_UPDATE_CONTROL
++{
++ gctOBJECT_HANDLER execute;
++ gctOBJECT_HANDLER update;
++ gctOBJECT_HANDLER lastExecute;
++ gctOBJECT_HANDLER lastUpdate;
++}
++gcsQUEUE_UPDATE_CONTROL;
++
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++static gceSTATUS
++_FlushMMU(
++ IN gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status;
++ gctUINT32 oldValue;
++ gckVGHARDWARE hardware = Command->hardware;
++
++ gcmkONERROR(gckOS_AtomicExchange(Command->os,
++ hardware->pageTableDirty,
++ 0,
++ &oldValue));
++
++ if (oldValue)
++ {
++ /* Page Table is upated, flush mmu before commit. */
++ gcmkONERROR(gckVGHARDWARE_FlushMMU(hardware));
++ }
++
++ return gcvSTATUS_OK;
++OnError:
++ return status;
++}
++
++static gceSTATUS
++_WaitForIdle(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctUINT32 idle;
++ gctUINT timeout = 0;
++
++ /* Loop while not idle. */
++ while (Queue->pending)
++ {
++ /* Did we reach the timeout limit? */
++ if (timeout == gcvQUEUE_TIMEOUT)
++ {
++ /* Hardware is probably dead... */
++ return gcvSTATUS_TIMEOUT;
++ }
++
++ /* Sleep for 100ms. */
++ gcmkERR_BREAK(gckOS_Delay(Command->os, 100));
++
++ /* Not the first loop? */
++ if (timeout > 0)
++ {
++ /* Read IDLE register. */
++ gcmkVERIFY_OK(gckVGHARDWARE_GetIdle(Command->hardware, &idle));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_COMMAND,
++ "%s: timeout, IDLE=%08X\n",
++ __FUNCTION__, idle
++ );
++ }
++
++ /* Increment the timeout counter. */
++ timeout += 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gctINT32
++_GetNextInterrupt(
++ IN gckVGCOMMAND Command,
++ IN gceBLOCK Block
++ )
++{
++ gctUINT index;
++ gcsBLOCK_TASK_ENTRY_PTR entry;
++ gctINT32 interrupt;
++
++ /* Get the block entry. */
++ entry = &Command->taskTable[Block];
++
++ /* Make sure we have initialized interrupts. */
++ gcmkASSERT(entry->interruptCount > 0);
++
++ /* Decrement the interrupt usage semaphore. */
++ gcmkVERIFY_OK(gckOS_DecrementSemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++
++ /* Get the value index. */
++ index = entry->interruptIndex;
++
++ /* Get the interrupt value. */
++ interrupt = entry->interruptArray[index];
++
++ /* Must be a valid value. */
++ gcmkASSERT((interrupt >= 0) && (interrupt <= 31));
++
++ /* Advance the index to the next value. */
++ index += 1;
++
++ /* Set the new index. */
++ entry->interruptIndex = (index == entry->interruptCount)
++ ? 0
++ : index;
++
++ /* Return interrupt value. */
++ return interrupt;
++}
++
++
++/******************************************************************************\
++***************************** Task Storage Management **************************
++\******************************************************************************/
++
++/* Minimum task buffer size. */
++#define gcvMIN_TASK_BUFFER \
++( \
++ gcmSIZEOF(gcsTASK_CONTAINER) + 128 \
++)
++
++/* Free list terminator. */
++#define gcvFREE_TASK_TERMINATOR \
++( \
++ (gcsTASK_CONTAINER_PTR) gcmINT2PTR(~0) \
++)
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------- Allocated Task Buffer List Management ------------------*/
++
++static void
++_InsertTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR AddAfter,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR addBefore;
++
++ /* Cannot add before the first buffer. */
++ gcmkASSERT(AddAfter != gcvNULL);
++
++ /* Create a shortcut to the next buffer. */
++ addBefore = AddAfter->allocNext;
++
++ /* Initialize the links. */
++ Buffer->allocPrev = AddAfter;
++ Buffer->allocNext = addBefore;
++
++ /* Link to the previous buffer. */
++ AddAfter->allocNext = Buffer;
++
++ /* Link to the next buffer. */
++ if (addBefore != gcvNULL)
++ {
++ addBefore->allocPrev = Buffer;
++ }
++}
++
++static void
++_RemoveTaskBuffer(
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++
++ /* Cannot remove the first buffer. */
++ gcmkASSERT(Buffer->allocPrev != gcvNULL);
++
++ /* Create shortcuts to the previous and next buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Tail buffer? */
++ if (next == gcvNULL)
++ {
++ /* Remove from the list. */
++ prev->allocNext = gcvNULL;
++ }
++
++ /* Buffer from the middle. */
++ else
++ {
++ prev->allocNext = next;
++ next->allocPrev = prev;
++ }
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*--------------------- Free Task Buffer List Management ---------------------*/
++
++static void
++_AppendToFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Cannot be a part of the free list already. */
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* First buffer to add? */
++ if (Command->taskFreeHead == gcvNULL)
++ {
++ /* Terminate the links. */
++ Buffer->freePrev = gcvFREE_TASK_TERMINATOR;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Initialize the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = Buffer;
++ }
++
++ /* Not the first, add after the tail. */
++ else
++ {
++ /* Initialize the new tail buffer. */
++ Buffer->freePrev = Command->taskFreeTail;
++ Buffer->freeNext = gcvFREE_TASK_TERMINATOR;
++
++ /* Add after the tail. */
++ Command->taskFreeTail->freeNext = Buffer;
++ Command->taskFreeTail = Buffer;
++ }
++}
++
++static void
++_RemoveFromFreeList(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ /* Has to be a part of the free list. */
++ gcmkASSERT(Buffer->freePrev != gcvNULL);
++ gcmkASSERT(Buffer->freeNext != gcvNULL);
++
++ /* Head buffer? */
++ if (Buffer->freePrev == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Tail buffer as well? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Reset the list pointer. */
++ Command->taskFreeHead = Command->taskFreeTail = gcvNULL;
++ }
++
++ /* No, just the head. */
++ else
++ {
++ /* Update the head. */
++ Command->taskFreeHead = Buffer->freeNext;
++
++ /* Terminate the next buffer. */
++ Command->taskFreeHead->freePrev = gcvFREE_TASK_TERMINATOR;
++ }
++ }
++
++ /* Not the head. */
++ else
++ {
++ /* Tail buffer? */
++ if (Buffer->freeNext == gcvFREE_TASK_TERMINATOR)
++ {
++ /* Update the tail. */
++ Command->taskFreeTail = Buffer->freePrev;
++
++ /* Terminate the previous buffer. */
++ Command->taskFreeTail->freeNext = gcvFREE_TASK_TERMINATOR;
++ }
++
++ /* A buffer in the middle. */
++ else
++ {
++ /* Remove the buffer from the list. */
++ Buffer->freePrev->freeNext = Buffer->freeNext;
++ Buffer->freeNext->freePrev = Buffer->freePrev;
++ }
++ }
++
++ /* Reset free list pointers. */
++ Buffer->freePrev = gcvNULL;
++ Buffer->freeNext = gcvNULL;
++}
++
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- Task Buffer Allocation --------------------------*/
++
++static void
++_SplitTaskBuffer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer,
++ IN gctUINT Size
++ )
++{
++ /* Determine the size of the new buffer. */
++ gctINT splitBufferSize = Buffer->size - Size;
++ gcmkASSERT(splitBufferSize >= 0);
++
++ /* Is the split buffer big enough to become a separate buffer? */
++ if (splitBufferSize >= gcvMIN_TASK_BUFFER)
++ {
++ /* Place the new path data. */
++ gcsTASK_CONTAINER_PTR splitBuffer = (gcsTASK_CONTAINER_PTR)
++ (
++ (gctUINT8_PTR) Buffer + Size
++ );
++
++ /* Set the trimmed buffer size. */
++ Buffer->size = Size;
++
++ /* Initialize the split buffer. */
++ splitBuffer->referenceCount = 0;
++ splitBuffer->size = splitBufferSize;
++ splitBuffer->freePrev = gcvNULL;
++ splitBuffer->freeNext = gcvNULL;
++
++ /* Link in. */
++ _InsertTaskBuffer(Buffer, splitBuffer);
++ _AppendToFreeList(Command, splitBuffer);
++ }
++}
++
++static gceSTATUS
++_AllocateTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ OUT gcsTASK_CONTAINER_PTR * Buffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x, Buffer ==0x%x", Command, Size, Buffer);
++
++ /* Verify arguments. */
++ gcmkVERIFY_ARGUMENT(Buffer != gcvNULL);
++
++ do
++ {
++ gcsTASK_STORAGE_PTR storage;
++ gcsTASK_CONTAINER_PTR buffer;
++
++ /* Adjust the size. */
++ Size += gcmSIZEOF(gcsTASK_CONTAINER);
++
++ /* Adjust the allocation size if not big enough. */
++ if (Size > Command->taskStorageUsable)
++ {
++ Command->taskStorageGranularity
++ = gcmALIGN(Size + gcmSIZEOF(gcsTASK_STORAGE), 1024);
++
++ Command->taskStorageUsable
++ = Command->taskStorageGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++ }
++
++ /* Is there a free buffer available? */
++ else if (Command->taskFreeHead != gcvNULL)
++ {
++ /* Set the initial free buffer. */
++ gcsTASK_CONTAINER_PTR buffer = Command->taskFreeHead;
++
++ do
++ {
++ /* Is the buffer big enough? */
++ if (buffer->size >= Size)
++ {
++ /* Remove the buffer from the free list. */
++ _RemoveFromFreeList(Command, buffer);
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++
++ /* Get the next free buffer. */
++ buffer = buffer->freeNext;
++ }
++ while (buffer != gcvFREE_TASK_TERMINATOR);
++ }
++
++ /* Allocate a container. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Command->os,
++ Command->taskStorageGranularity,
++ (gctPOINTER *) &storage
++ ));
++
++ /* Link in the storage buffer. */
++ storage->next = Command->taskStorage;
++ Command->taskStorage = storage;
++
++ /* Place the task buffer. */
++ buffer = (gcsTASK_CONTAINER_PTR) (storage + 1);
++
++ /* Determine the size of the buffer. */
++ buffer->size
++ = Command->taskStorageGranularity
++ - gcmSIZEOF(gcsTASK_STORAGE);
++
++ /* Initialize the task buffer. */
++ buffer->referenceCount = 0;
++ buffer->allocPrev = gcvNULL;
++ buffer->allocNext = gcvNULL;
++ buffer->freePrev = gcvNULL;
++ buffer->freeNext = gcvNULL;
++
++ /* Split the buffer. */
++ _SplitTaskBuffer(Command, buffer, Size);
++
++ /* Set the result. */
++ * Buffer = buffer;
++
++ gcmkFOOTER_ARG("*Buffer=0x%x",*Buffer);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++static void
++_FreeTaskContainer(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_CONTAINER_PTR Buffer
++ )
++{
++ gcsTASK_CONTAINER_PTR prev;
++ gcsTASK_CONTAINER_PTR next;
++ gcsTASK_CONTAINER_PTR merged;
++
++ gctSIZE_T mergedSize;
++
++ /* Verify arguments. */
++ gcmkASSERT(Buffer != gcvNULL);
++ gcmkASSERT(Buffer->freePrev == gcvNULL);
++ gcmkASSERT(Buffer->freeNext == gcvNULL);
++
++ /* Get shortcuts to the previous and next path data buffers. */
++ prev = Buffer->allocPrev;
++ next = Buffer->allocNext;
++
++ /* Is the previous path data buffer already free? */
++ if (prev && prev->freeNext)
++ {
++ /* The previous path data buffer is the one that remains. */
++ merged = prev;
++
++ /* Is the next path data buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge all three path data buffers into the previous. */
++ mergedSize = prev->size + Buffer->size + next->size;
++
++ /* Remove the next path data buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++ }
++ else
++ {
++ /* Merge the current path data buffer into the previous. */
++ mergedSize = prev->size + Buffer->size;
++ }
++
++ /* Delete the current path data buffer. */
++ _RemoveTaskBuffer(Buffer);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++ else
++ {
++ /* The current path data buffer is the one that remains. */
++ merged = Buffer;
++
++ /* Is the next buffer already free? */
++ if (next && next->freeNext)
++ {
++ /* Merge the next into the current. */
++ mergedSize = Buffer->size + next->size;
++
++ /* Remove the next buffer. */
++ _RemoveFromFreeList(Command, next);
++ _RemoveTaskBuffer(next);
++
++ /* Set new size. */
++ merged->size = mergedSize;
++ }
++
++ /* Add the current buffer into the free list. */
++ _AppendToFreeList(Command, merged);
++ }
++}
++
++gceSTATUS
++_RemoveRecordFromProcesDB(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_HEADER_PTR Task
++ )
++{
++ gcsTASK_PTR task = (gcsTASK_PTR)((gctUINT8_PTR)Task - sizeof(gcsTASK));
++ gcsTASK_FREE_VIDEO_MEMORY_PTR freeVideoMemory;
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR unlockVideoMemory;
++ gctINT pid;
++ gctUINT32 size;
++
++ /* Get the total size of all tasks. */
++ size = task->size;
++
++ gcmkVERIFY_OK(gckOS_GetProcessID((gctUINT32_PTR)&pid));
++
++ do
++ {
++ switch (Task->id)
++ {
++ case gcvTASK_FREE_VIDEO_MEMORY:
++ freeVideoMemory = (gcsTASK_FREE_VIDEO_MEMORY_PTR)Task;
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ gcvDB_VIDEO_MEMORY,
++ gcmUINT64_TO_PTR(freeVideoMemory->node)));
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_FREE_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(freeVideoMemory + 1);
++
++ break;
++ case gcvTASK_UNLOCK_VIDEO_MEMORY:
++ unlockVideoMemory = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR)Task;
++
++ /* Remove record from process db. */
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Command->kernel->kernel,
++ pid,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(unlockVideoMemory->node)));
++
++ /* Advance to next task. */
++ size -= sizeof(gcsTASK_UNLOCK_VIDEO_MEMORY);
++ Task = (gcsTASK_HEADER_PTR)(unlockVideoMemory + 1);
++
++ break;
++ default:
++ /* Skip the whole task. */
++ size = 0;
++ break;
++ }
++ }
++ while(size);
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++********************************* Task Scheduling ******************************
++\******************************************************************************/
++
++static gceSTATUS
++_ScheduleTasks(
++ IN gckVGCOMMAND Command,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable,
++ IN gctUINT8_PTR PreviousEnd
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gctINT block;
++ gcsTASK_CONTAINER_PTR container;
++ gcsTASK_MASTER_ENTRY_PTR userTaskEntry;
++ gcsBLOCK_TASK_ENTRY_PTR kernelTaskEntry;
++ gcsTASK_PTR userTask;
++ gctUINT8_PTR kernelTask;
++ gctINT32 interrupt;
++ gctUINT8_PTR eventCommand;
++
++ /* Nothing to schedule? */
++ if (TaskTable->size == 0)
++ {
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->taskMutex,
++ gcvINFINITE
++ ));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ do
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " number of tasks scheduled = %d\n"
++ " size of event data in bytes = %d\n",
++ TaskTable->count,
++ TaskTable->size
++ );
++
++ /* Allocate task buffer. */
++ gcmkERR_BREAK(_AllocateTaskContainer(
++ Command,
++ TaskTable->size,
++ &container
++ ));
++
++ /* Determine the task data pointer. */
++ kernelTask = (gctUINT8_PTR) (container + 1);
++
++ /* Initialize the reference count. */
++ container->referenceCount = TaskTable->count;
++
++ /* Process tasks. */
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " processing tasks for block %d\n",
++ block
++ );
++
++ /* Get the current kernel table entry. */
++ kernelTaskEntry = &Command->taskTable[block];
++
++ /* Are there tasks for the current block scheduled? */
++ if (kernelTaskEntry->container == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " first task container for the block added\n",
++ block
++ );
++
++ /* Nothing yet, set the container buffer pointer. */
++ kernelTaskEntry->container = container;
++ kernelTaskEntry->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Yes, append to the end. */
++ else
++ {
++ kernelTaskEntry->link->cotainer = container;
++ kernelTaskEntry->link->task = (gcsTASK_HEADER_PTR) kernelTask;
++ }
++
++ /* Set initial task. */
++ userTask = userTaskEntry->head;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " copying user tasks over to the kernel\n"
++ );
++
++ /* Copy tasks. */
++ do
++ {
++ gcsTASK_HEADER_PTR taskHeader = (gcsTASK_HEADER_PTR) (userTask + 1);
++
++ gcmkVERIFY_OK(_RemoveRecordFromProcesDB(Command, taskHeader));
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " task ID = %d, size = %d\n",
++ ((gcsTASK_HEADER_PTR) (userTask + 1))->id,
++ userTask->size
++ );
++
++#ifdef __QNXNTO__
++ if (taskHeader->id == gcvTASK_SIGNAL)
++ {
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->coid = TaskTable->coid;
++ ((gcsTASK_SIGNAL_PTR)taskHeader)->rcvid = TaskTable->rcvid;
++ }
++#endif /* __QNXNTO__ */
++ /* Copy the task data. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ kernelTask, taskHeader, userTask->size
++ ));
++
++ /* Advance to the next task. */
++ kernelTask += userTask->size;
++ userTask = userTask->next;
++ }
++ while (userTask != gcvNULL);
++
++ /* Update link pointer in the header. */
++ kernelTaskEntry->link = (gcsTASK_LINK_PTR) kernelTask;
++
++ /* Initialize link task. */
++ kernelTaskEntry->link->id = gcvTASK_LINK;
++ kernelTaskEntry->link->cotainer = gcvNULL;
++ kernelTaskEntry->link->task = gcvNULL;
++
++ /* Advance the task data pointer. */
++ kernelTask += gcmSIZEOF(gcsTASK_LINK);
++ }
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->taskMutex
++ ));
++
++ /* Assign interrupts to the blocks. */
++ eventCommand = PreviousEnd;
++
++ for (block = gcvBLOCK_COUNT - 1; block >= 0; block -= 1)
++ {
++ /* Get the current user table entry. */
++ userTaskEntry = &TaskTable->table[block];
++
++ /* Are there tasks scheduled? */
++ if (userTaskEntry->head == gcvNULL)
++ {
++ /* No, skip to the next block. */
++ continue;
++ }
++
++ /* Get the interrupt number. */
++ interrupt = _GetNextInterrupt(Command, block);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): block = %d interrupt = %d\n",
++ __FUNCTION__, __LINE__,
++ block, interrupt
++ );
++
++ /* Determine the command position. */
++ eventCommand -= Command->info.eventCommandSize;
++
++ /* Append an EVENT command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EventCommand(
++ Command, eventCommand, block, interrupt, gcvNULL
++ ));
++ }
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************** Memory Management *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_HardwareToKernel(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++ gckVIDMEM memory;
++ gctUINT32 offset;
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++ gctUINT32 nodePhysical;
++#endif
++ status = gcvSTATUS_OK;
++ /* Assume a non-virtual node and get the pool manager object. */
++ memory = Node->VidMem.memory;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++ nodePhysical = memory->baseAddress
++ + Node->VidMem.offset
++ + Node->VidMem.alignment;
++
++ if (Node->VidMem.kernelVirtual == gcvNULL)
++ {
++ status = gckOS_MapPhysical(Os,
++ nodePhysical,
++ Node->VidMem.bytes,
++ (gctPOINTER *)&Node->VidMem.kernelVirtual);
++
++ if (gcmkIS_ERROR(status))
++ {
++ return status;
++ }
++ }
++
++ offset = Address - nodePhysical;
++ *KernelPointer = (gctPOINTER)((gctUINT8_PTR)Node->VidMem.kernelVirtual + offset);
++#else
++ /* Determine the header offset within the pool it is allocated in. */
++ offset = Address - memory->baseAddress;
++
++ /* Translate the offset into the kernel side pointer. */
++ status = gckOS_GetKernelLogicalEx(
++ Os,
++ gcvCORE_VG,
++ offset,
++ KernelPointer
++ );
++#endif
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_ConvertUserCommandBufferPointer(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR UserCommandBuffer,
++ OUT gcsCMDBUFFER_PTR * KernelCommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcsCMDBUFFER_PTR mappedUserCommandBuffer = gcvNULL;
++
++ do
++ {
++ gctUINT32 headerAddress;
++
++ /* Map the command buffer structure into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ (gctPOINTER *) &mappedUserCommandBuffer
++ ));
++
++ /* Determine the address of the header. */
++ headerAddress
++ = mappedUserCommandBuffer->address
++ - mappedUserCommandBuffer->bufferOffset;
++
++ /* Translate the logical address to the kernel space. */
++ gcmkERR_BREAK(_HardwareToKernel(
++ Command->os,
++ gcmUINT64_TO_PTR(mappedUserCommandBuffer->node),
++ headerAddress,
++ (gctPOINTER *) KernelCommandBuffer
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Unmap the user command buffer. */
++ if (mappedUserCommandBuffer != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_UnmapUserPointer(
++ Command->os,
++ UserCommandBuffer,
++ gcmSIZEOF(gcsCMDBUFFER),
++ mappedUserCommandBuffer
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_AllocateLinear(
++ IN gckVGCOMMAND Command,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gcuVIDMEM_NODE_PTR * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status, last;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctUINT32 address = (gctUINT32)~0;
++
++ do
++ {
++ gcePOOL pool;
++ gctPOINTER logical;
++
++ /* Allocate from the system pool. */
++ pool = gcvPOOL_SYSTEM;
++
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Command->kernel->kernel, &pool,
++ Size, Alignment,
++ gcvSURF_TYPE_UNKNOWN,
++ &node
++ ));
++
++ /* Do not accept virtual pools for now because we don't handle the
++ kernel pointer translation at the moment. */
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ /* Lock the command buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Lock(
++ Command->kernel->kernel,
++ node,
++ gcvFALSE,
++ &address
++ ));
++
++ /* Translate the logical address to the kernel space. */
++ gcmkERR_BREAK(_HardwareToKernel(
++ Command->os,
++ node,
++ address,
++ &logical
++ ));
++
++ /* Set return values. */
++ * Node = node;
++ * Address = address;
++ * Logical = logical;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Unlock the command buffer. */
++ if (address != ~0)
++ {
++ gcmkCHECK_STATUS(gckVIDMEM_Unlock(
++ Command->kernel->kernel, node, gcvSURF_TYPE_UNKNOWN, gcvNULL
++ ));
++ }
++
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(gckVIDMEM_Free(
++ Command->kernel->kernel,
++ node
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeLinear(
++ IN gckVGKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Unlock the linear buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Unlock(Kernel->kernel, Node, gcvSURF_TYPE_UNKNOWN, gcvNULL));
++
++ /* Free the linear buffer. */
++ gcmkERR_BREAK(gckVIDMEM_Free(Kernel->kernel, Node));
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++_AllocateCommandBuffer(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer
++ )
++{
++ gceSTATUS status, last;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++
++ do
++ {
++ gctUINT alignedHeaderSize;
++ gctUINT requestedSize;
++ gctUINT allocationSize;
++ gctUINT32 address = 0;
++ gcsCMDBUFFER_PTR commandBuffer;
++ gctUINT8_PTR endCommand;
++
++ /* Determine the aligned header size. */
++ alignedHeaderSize
++ = gcmALIGN(gcmSIZEOF(gcsCMDBUFFER), Command->info.addressAlignment);
++
++ /* Align the requested size. */
++ requestedSize
++ = gcmALIGN(Size, Command->info.commandAlignment);
++
++ /* Determine the size of the buffer to allocate. */
++ allocationSize
++ = alignedHeaderSize
++ + requestedSize
++ + Command->info.staticTailSize;
++
++ /* Allocate the command buffer. */
++ gcmkERR_BREAK(_AllocateLinear(
++ Command,
++ allocationSize,
++ Command->info.addressAlignment,
++ &node,
++ &address,
++ (gctPOINTER *) &commandBuffer
++ ));
++
++ /* Initialize the structure. */
++ commandBuffer->completion = gcvVACANT_BUFFER;
++ commandBuffer->node = gcmPTR_TO_UINT64(node);
++ commandBuffer->address = address + alignedHeaderSize;
++ commandBuffer->bufferOffset = alignedHeaderSize;
++ commandBuffer->size = requestedSize;
++ commandBuffer->offset = requestedSize;
++ commandBuffer->nextAllocated = gcvNULL;
++ commandBuffer->nextSubBuffer = gcvNULL;
++
++ /* Determine the data count. */
++ commandBuffer->dataCount
++ = (requestedSize + Command->info.staticTailSize)
++ / Command->info.commandAlignment;
++
++ /* Determine the location of the END command. */
++ endCommand
++ = (gctUINT8_PTR) commandBuffer
++ + alignedHeaderSize
++ + requestedSize;
++
++ /* Append an END command. */
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ endCommand,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++
++ /* Set the return pointer. */
++ * CommandBuffer = commandBuffer;
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Free the command buffer. */
++ gcmkCHECK_STATUS(_FreeLinear(Command->kernel, node));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_FreeCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ /* Free the buffer. */
++ status = _FreeLinear(Kernel, gcmUINT64_TO_PTR(CommandBuffer->node));
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++****************************** TS Overflow Handler *****************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_TSOverflow(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** TS OVERFLOW ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++
++/******************************************************************************\
++****************************** Bus Error Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_BusError(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): **** BUS ERROR ENCOUNTERED ****\n",
++ __FUNCTION__, __LINE__
++ );
++
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************\
++****************************** Power Stall Handler *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_PowerStall(
++ IN gckVGKERNEL Kernel
++ )
++{
++ /* Signal. */
++ return gckOS_Signal(
++ Kernel->os,
++ Kernel->command->powerStallSignal,
++ gcvTRUE);
++}
++
++/******************************************************************************\
++******************************** Task Routines *********************************
++\******************************************************************************/
++
++typedef gceSTATUS (* gctTASKROUTINE) (
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ );
++
++static gctTASKROUTINE _taskRoutine[] =
++{
++ _TaskLink, /* gcvTASK_LINK */
++ _TaskCluster, /* gcvTASK_CLUSTER */
++ _TaskIncrement, /* gcvTASK_INCREMENT */
++ _TaskDecrement, /* gcvTASK_DECREMENT */
++ _TaskSignal, /* gcvTASK_SIGNAL */
++ _TaskLockdown, /* gcvTASK_LOCKDOWN */
++ _TaskUnlockVideoMemory, /* gcvTASK_UNLOCK_VIDEO_MEMORY */
++ _TaskFreeVideoMemory, /* gcvTASK_FREE_VIDEO_MEMORY */
++ _TaskFreeContiguousMemory, /* gcvTASK_FREE_CONTIGUOUS_MEMORY */
++ _TaskUnmapUserMemory, /* gcvTASK_UNMAP_USER_MEMORY */
++};
++
++static gceSTATUS
++_TaskLink(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ /* Cast the task pointer. */
++ gcsTASK_LINK_PTR task = (gcsTASK_LINK_PTR) TaskHeader->task;
++
++ /* Save the pointer to the container. */
++ gcsTASK_CONTAINER_PTR container = TaskHeader->container;
++
++ /* No more tasks in the list? */
++ if (task->task == gcvNULL)
++ {
++ /* Reset the entry. */
++ TaskHeader->container = gcvNULL;
++ TaskHeader->task = gcvNULL;
++ TaskHeader->link = gcvNULL;
++ }
++ else
++ {
++ /* Update the entry. */
++ TaskHeader->container = task->cotainer;
++ TaskHeader->task = task->task;
++ }
++
++ /* Decrement the task buffer reference. */
++ gcmkASSERT(container->referenceCount >= 0);
++ if (container->referenceCount == 0)
++ {
++ /* Free the container. */
++ _FreeTaskContainer(Command, container);
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_TaskCluster(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ /* Cast the task pointer. */
++ gcsTASK_CLUSTER_PTR cluster = (gcsTASK_CLUSTER_PTR) TaskHeader->task;
++
++ /* Get the number of tasks. */
++ gctUINT taskCount = cluster->taskCount;
++
++ /* Advance to the next task. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (cluster + 1);
++
++ /* Perform all tasks in the cluster. */
++ while (taskCount)
++ {
++ /* Perform the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ Command,
++ TaskHeader
++ ));
++
++ /* Update the task count. */
++ taskCount -= 1;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskIncrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_INCREMENT_PTR task = (gcsTASK_INCREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Increment data. */
++ (* logical) += 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskDecrement(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_DECREMENT_PTR task = (gcsTASK_DECREMENT_PTR) TaskHeader->task;
++
++ /* Convert physical into logical address. */
++ gctUINT32_PTR logical;
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->address,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &logical
++ ));
++
++ /* Decrement data. */
++ (* logical) -= 1;
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(gckOS_UnmapPhysical(
++ Command->os,
++ logical,
++ gcmSIZEOF(gctUINT32)
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskSignal(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_SIGNAL_PTR task = (gcsTASK_SIGNAL_PTR) TaskHeader->task;
++
++
++ /* Map the signal into kernel space. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->rcvid, task->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, task->signal, task->process
++ ));
++#endif /* __QNXNTO__ */
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskLockdown(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++ gctUINT32_PTR userCounter = gcvNULL;
++ gctUINT32_PTR kernelCounter = gcvNULL;
++ gctSIGNAL signal = gcvNULL;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_LOCKDOWN_PTR task = (gcsTASK_LOCKDOWN_PTR) TaskHeader->task;
++
++ /* Convert physical addresses into logical. */
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->userCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &userCounter
++ ));
++
++ gcmkERR_BREAK(gckOS_MapPhysical(
++ Command->os,
++ task->kernelCounter,
++ gcmSIZEOF(gctUINT32),
++ (gctPOINTER *) &kernelCounter
++ ));
++
++ /* Update the kernel counter. */
++ (* kernelCounter) += 1;
++
++ /* Are the counters equal? */
++ if ((* userCounter) == (* kernelCounter))
++ {
++ /* Map the signal into kernel space. */
++ gcmkERR_BREAK(gckOS_MapSignal(
++ Command->os, task->signal, task->process, &signal
++ ));
++
++ if (signal == gcvNULL)
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, task->signal, gcvTRUE
++ ));
++ }
++ else
++ {
++ /* Signal. */
++ gcmkERR_BREAK(gckOS_Signal(
++ Command->os, signal, gcvTRUE
++ ));
++ }
++ }
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Destroy the mapped signal. */
++ if (signal != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Command->os, signal
++ ));
++ }
++
++ /* Unmap the physical memory. */
++ if (kernelCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ kernelCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ if (userCounter != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapPhysical(
++ Command->os,
++ userCounter,
++ gcmSIZEOF(gctUINT32)
++ ));
++ }
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnlockVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNLOCK_VIDEO_MEMORY_PTR task
++ = (gcsTASK_UNLOCK_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Unlock video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Unlock(
++ Command->kernel->kernel,
++ gcmUINT64_TO_PTR(task->node),
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeVideoMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_VIDEO_MEMORY_PTR task
++ = (gcsTASK_FREE_VIDEO_MEMORY_PTR) TaskHeader->task;
++
++ /* Free video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Free(Command->kernel->kernel, gcmUINT64_TO_PTR(task->node)));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskFreeContiguousMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR task
++ = (gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR) TaskHeader->task;
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Command->os, task->physical, task->logical, task->bytes
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_TaskUnmapUserMemory(
++ gckVGCOMMAND Command,
++ gcsBLOCK_TASK_ENTRY_PTR TaskHeader
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the task pointer. */
++ gcsTASK_UNMAP_USER_MEMORY_PTR task
++ = (gcsTASK_UNMAP_USER_MEMORY_PTR) TaskHeader->task;
++
++ /* Unmap the user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Command->os, gcvCORE_VG, task->memory, task->size, task->info, task->address
++ ));
++
++ /* Update the reference counter. */
++ TaskHeader->container->referenceCount -= 1;
++
++ /* Update the task pointer. */
++ TaskHeader->task = (gcsTASK_HEADER_PTR) (task + 1);
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++************ Hardware Block Interrupt Handlers For Scheduled Events ************
++\******************************************************************************/
++
++static gceSTATUS
++_EventHandler_Block(
++ IN gckVGKERNEL Kernel,
++ IN gcsBLOCK_TASK_ENTRY_PTR TaskHeader,
++ IN gctBOOL ProcessAll
++ )
++{
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskHeader=0x%x ProcessAll=0x%x", Kernel, TaskHeader, ProcessAll);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ do
++ {
++ gckVGCOMMAND command;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Increment the interrupt usage semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, TaskHeader->interruptSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->taskMutex,
++ gcvINFINITE
++ ));
++
++ /* Verify inputs. */
++ gcmkASSERT(TaskHeader != gcvNULL);
++ gcmkASSERT(TaskHeader->container != gcvNULL);
++ gcmkASSERT(TaskHeader->task != gcvNULL);
++ gcmkASSERT(TaskHeader->link != gcvNULL);
++
++ /* Process tasks. */
++ do
++ {
++ /* Process the current task. */
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Is the next task is LINK? */
++ if (TaskHeader->task->id == gcvTASK_LINK)
++ {
++ gcmkERR_BREAK(_taskRoutine[TaskHeader->task->id](
++ command,
++ TaskHeader
++ ));
++
++ /* Done. */
++ break;
++ }
++ }
++ while (ProcessAll);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->taskMutex
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gcmDECLARE_INTERRUPT_HANDLER(COMMAND, 0)
++{
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Kernel=0x%x ", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++
++ do
++ {
++ gckVGCOMMAND command;
++ gcsKERNEL_QUEUE_HEADER_PTR mergeQueue;
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT entryCount;
++
++ /* Get the command buffer object. */
++ command = Kernel->command;
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ command->os,
++ command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Get the current queue. */
++ queueTail = command->queueTail;
++
++ /* Get the current queue entry. */
++ entry = queueTail->currentEntry;
++
++ /* Get the number of entries in the queue. */
++ entryCount = queueTail->pending;
++
++ /* Process all entries. */
++ while (gcvTRUE)
++ {
++ /* Call post-execution function. */
++ status = entry->handler(Kernel, entry);
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: post action failed.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Executed the next buffer? */
++ if (status == gcvSTATUS_EXECUTED)
++ {
++ /* Update the queue. */
++ queueTail->pending = entryCount;
++ queueTail->currentEntry = entry;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++
++ /* Break out of the loop. */
++ break;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ entryCount -= 1;
++
++ /* Last entry? */
++ if (entryCount == 0)
++ {
++ /* Reset the queue to idle. */
++ queueTail->pending = 0;
++
++ /* Get a shortcut to the queue to merge with. */
++ mergeQueue = command->mergeQueue;
++
++ /* Merge the queues if necessary. */
++ if (mergeQueue != queueTail)
++ {
++ gcmkASSERT(mergeQueue < queueTail);
++ gcmkASSERT(mergeQueue->next == queueTail);
++
++ mergeQueue->size
++ += gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + queueTail->size;
++
++ mergeQueue->next = queueTail->next;
++ }
++
++ /* Advance to the next queue. */
++ queueTail = queueTail->next;
++
++ /* Did it wrap around? */
++ if (command->queue == queueTail)
++ {
++ /* Reset merge queue. */
++ command->mergeQueue = queueTail;
++ }
++
++ /* Set new queue. */
++ command->queueTail = queueTail;
++
++ /* Is the next queue scheduled? */
++ if (queueTail->pending > 0)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ status = gckVGHARDWARE_Execute(
++ command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Failed? */
++ if (gcmkIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR,
++ gcvZONE_COMMAND,
++ "[%s] line %d: failed to start the next queue.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ }
++ else
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(
++ Kernel->command->hardware, gcvPOWER_IDLE_BROADCAST
++ );
++ }
++
++ /* Break out of the loop. */
++ break;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ command->os,
++ command->queueMutex
++ ));
++ }
++ while (gcvFALSE);
++
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/* Define standard block interrupt handlers. */
++gcmDEFINE_INTERRUPT_HANDLER(TESSELLATOR, 0)
++gcmDEFINE_INTERRUPT_HANDLER(VG, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 0)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 1)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 2)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 3)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 4)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 5)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 6)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 7)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 8)
++gcmDEFINE_INTERRUPT_HANDLER(PIXEL, 9)
++
++/* The entries in the array are arranged by event priority. */
++static gcsBLOCK_INTERRUPT_HANDLER _blockHandlers[] =
++{
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(TESSELLATOR, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(VG, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 0),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 1),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 2),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 3),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 4),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 5),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 6),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 7),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 8),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(PIXEL, 9),
++ gcmDEFINE_INTERRUPT_HANDLER_ENTRY(COMMAND, 0),
++};
++
++
++/******************************************************************************\
++************************* Static Command Buffer Handlers ***********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Cast the command buffer header. */
++ commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastStaticCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastStaticCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++************************* Dynamic Command Buffer Handlers **********************
++\******************************************************************************/
++
++static gceSTATUS
++_UpdateDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d)\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_ExecuteDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UpdateLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++#if gcvDEBUG || gcdFORCE_MESSAGES
++ /* Get the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Validate the command buffer. */
++ gcmkASSERT(commandBuffer->completion != gcvNULL);
++ gcmkASSERT(commandBuffer->completion != gcvVACANT_BUFFER);
++
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): processing all tasks scheduled for FE.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Perform scheduled tasks. */
++ return _EventHandler_Block(
++ Kernel,
++ &Kernel->command->taskTable[gcvBLOCK_COMMAND],
++ gcvTRUE
++ );
++}
++
++static gceSTATUS
++_ExecuteLastDynamicCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Cast the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = Entry->commandBuffer;
++
++ /* Set to update the command buffer next time. */
++ Entry->handler = _UpdateLastDynamicCommandBuffer;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s(%d): executing next buffer @ 0x%08X, data count = %d\n",
++ __FUNCTION__, __LINE__,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ );
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Kernel->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++
++ /* Success. */
++ return gcvSTATUS_EXECUTED;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++********************************* Other Handlers *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_FreeKernelCommandBuffer(
++ IN gckVGKERNEL Kernel,
++ IN gcsKERNEL_CMDQUEUE_PTR Entry
++ )
++{
++ gceSTATUS status;
++
++ /* Free the command buffer. */
++ status = _FreeCommandBuffer(Kernel, Entry->commandBuffer);
++
++ /* Return status. */
++ return status;
++}
++
++
++/******************************************************************************\
++******************************* Queue Management *******************************
++\******************************************************************************/
++
++#if gcvDUMP_COMMAND_BUFFER
++static void
++_DumpCommandQueue(
++ IN gckVGCOMMAND Command,
++ IN gcsKERNEL_QUEUE_HEADER_PTR QueueHeader,
++ IN gctUINT EntryCount
++ )
++{
++ gcsKERNEL_CMDQUEUE_PTR entry;
++ gctUINT queueIndex;
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ static gctUINT arrayCount = 0;
++#endif
++
++ /* Is dumpinng enabled? */
++ if (!Commad->enableDumping)
++ {
++ return;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "COMMAND QUEUE DUMP: %d entries\n", EntryCount
++ );
++#endif
++
++ /* Get the pointer to the first entry. */
++ entry = QueueHeader->currentEntry;
++
++ /* Iterate through the queue. */
++ for (queueIndex = 0; queueIndex < EntryCount; queueIndex += 1)
++ {
++ gcsCMDBUFFER_PTR buffer;
++ gctUINT bufferCount;
++ gctUINT bufferIndex;
++ gctUINT i, count;
++ gctUINT size;
++ gctUINT32_PTR data;
++
++#if gcvDUMP_COMMAND_LINES
++ gctUINT lineNumber;
++#endif
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_COMMAND,
++ "ENTRY %d\n", queueIndex
++ );
++#endif
++
++ /* Reset the count. */
++ bufferCount = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Update the count. */
++ bufferCount += 1;
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ }
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ if (bufferCount > 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER SET: %d buffers.\n",
++ bufferCount
++ );
++ }
++#endif
++
++ /* Reset the buffer index. */
++ bufferIndex = 0;
++
++ /* Set the initial buffer. */
++ buffer = entry->commandBuffer;
++
++ /* Loop through all subbuffers. */
++ while (buffer)
++ {
++ /* Determine the size of the buffer. */
++ size = buffer->dataCount * Command->info.commandAlignment;
++
++#if !defined(gcvCOMMAND_BUFFER_NAME)
++ /* A single buffer? */
++ if (bufferCount == 1)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER: count=%d (0x%X), size=%d bytes @ %08X.\n",
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ " COMMAND BUFFER %d: count=%d (0x%X), size=%d bytes @ %08X\n",
++ bufferIndex,
++ buffer->dataCount,
++ buffer->dataCount,
++ size,
++ buffer->address
++ );
++ }
++#endif
++
++ /* Determine the number of double words to print. */
++ count = size / 4;
++
++ /* Determine the buffer location. */
++ data = (gctUINT32_PTR)
++ (
++ (gctUINT8_PTR) buffer + buffer->bufferOffset
++ );
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "unsigned int _" gcvCOMMAND_BUFFER_NAME "_%d[] =\n",
++ arrayCount
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "{\n"
++ );
++
++ arrayCount += 1;
++#endif
++
++#if gcvDUMP_COMMAND_LINES
++ /* Reset the line number. */
++ lineNumber = 0;
++#endif
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ count -= 2;
++#endif
++
++ for (i = 0; i < count; i += 1)
++ {
++ if ((i % 8) == 0)
++ {
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\t");
++#else
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " ");
++#endif
++ }
++
++#if gcvDUMP_COMMAND_LINES
++ if (lineNumber == gcvDUMP_COMMAND_LINES)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, " . . . . . . . . .\n");
++ break;
++ }
++#endif
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "0x%08X", data[i]);
++
++ if (i + 1 == count)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, "\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ if (((i + 1) % 8) == 0)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ",\n");
++
++#if gcvDUMP_COMMAND_LINES
++ lineNumber += 1;
++#endif
++ }
++ else
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_COMMAND, ", ");
++ }
++ }
++ }
++
++#if defined(gcvCOMMAND_BUFFER_NAME)
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO,
++ gcvZONE_COMMAND,
++ "};\n\n"
++ );
++#endif
++
++ /* Advance to the next subbuffer. */
++ buffer = buffer->nextSubBuffer;
++ bufferIndex += 1;
++ }
++
++ /* Advance to the next entry. */
++ entry += 1;
++ }
++}
++#endif
++
++static gceSTATUS
++_LockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ OUT gcsKERNEL_CMDQUEUE_PTR * Entries,
++ OUT gctUINT_PTR EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++
++ /* Get a shortcut to the head of the queue. */
++ queueHead = Command->queueHead;
++
++ /* Is the head buffer still being worked on? */
++ if (queueHead->pending)
++ {
++ /* Increment overflow count. */
++ Command->queueOverflow += 1;
++
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, queueHead));
++ }
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++
++ /* Determine the first queue entry. */
++ queueHead->currentEntry = (gcsKERNEL_CMDQUEUE_PTR)
++ (
++ (gctUINT8_PTR) queueHead + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ );
++
++ /* Set the pointer to the first entry. */
++ * Entries = queueHead->currentEntry;
++
++ /* Determine the number of available entries. */
++ * EntryCount = queueHead->size / gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++static gceSTATUS
++_UnlockCurrentQueue(
++ IN gckVGCOMMAND Command,
++ IN gctUINT EntryCount
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++#if !gcdENABLE_INFINITE_SPEED_HW
++ gcsKERNEL_QUEUE_HEADER_PTR queueTail;
++ gcsKERNEL_QUEUE_HEADER_PTR queueHead;
++ gcsKERNEL_QUEUE_HEADER_PTR queueNext;
++ gctUINT queueSize;
++ gctUINT newSize;
++ gctUINT unusedSize;
++
++ /* Get shortcut to the head and to the tail of the queue. */
++ queueTail = Command->queueTail;
++ queueHead = Command->queueHead;
++
++ /* Dump the command buffer. */
++#if gcvDUMP_COMMAND_BUFFER
++ _DumpCommandQueue(Command, queueHead, EntryCount);
++#endif
++
++ /* Get a shortcut to the current queue size. */
++ queueSize = queueHead->size;
++
++ /* Determine the new queue size. */
++ newSize = EntryCount * gcmSIZEOF(gcsKERNEL_CMDQUEUE);
++ gcmkASSERT(newSize <= queueSize);
++
++ /* Determine the size of the unused area. */
++ unusedSize = queueSize - newSize;
++
++ /* Is the unused area big enough to become a buffer? */
++ if (unusedSize >= gcvMINUMUM_BUFFER)
++ {
++ gcsKERNEL_QUEUE_HEADER_PTR nextHead;
++
++ /* Place the new header. */
++ nextHead = (gcsKERNEL_QUEUE_HEADER_PTR)
++ (
++ (gctUINT8_PTR) queueHead
++ + gcmSIZEOF(gcsKERNEL_QUEUE_HEADER)
++ + newSize
++ );
++
++ /* Initialize the buffer. */
++ nextHead->size = unusedSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ nextHead->pending = 0;
++
++ /* Link the buffer in. */
++ nextHead->next = queueHead->next;
++ queueHead->next = nextHead;
++ queueNext = nextHead;
++
++ /* Update the size of the current buffer. */
++ queueHead->size = newSize;
++ }
++
++ /* Not big enough. */
++ else
++ {
++ /* Determine the next queue. */
++ queueNext = queueHead->next;
++ }
++
++ /* Mark the buffer as busy. */
++ queueHead->pending = EntryCount;
++
++ /* Advance to the next buffer. */
++ Command->queueHead = queueNext;
++
++ /* Start the command processor if the queue was empty. */
++ if (queueTail == queueHead)
++ {
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* The first entry must be a command buffer. */
++ commandBuffer = queueTail->currentEntry->commandBuffer;
++
++ /* Start the command processor. */
++ gcmkERR_BREAK(gckVGHARDWARE_Execute(
++ Command->hardware,
++ commandBuffer->address,
++ commandBuffer->dataCount
++ ));
++ }
++
++ /* The queue was not empty. */
++ else
++ {
++ /* Advance the merge buffer if needed. */
++ if (queueHead == Command->mergeQueue)
++ {
++ Command->mergeQueue = queueNext;
++ }
++ }
++#endif
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ return status;
++}
++
++
++
++/******************************************************************************\
++****************************** gckVGCOMMAND API Code *****************************
++\******************************************************************************/
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ )
++{
++ gceSTATUS status, last;
++ gckVGCOMMAND command = gcvNULL;
++ gcsKERNEL_QUEUE_HEADER_PTR queue;
++ gctUINT i, j;
++
++ gcmkHEADER_ARG("Kernel=0x%x TaskGranularity=0x%x QueueSize=0x%x Command=0x%x",
++ Kernel, TaskGranularity, QueueSize, Command);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(QueueSize >= gcvMINUMUM_BUFFER);
++ gcmkVERIFY_ARGUMENT(Command != gcvNULL);
++
++ do
++ {
++ /***********************************************************************
++ ** Generic object initialization.
++ */
++
++ /* Allocate the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGCOMMAND),
++ (gctPOINTER *) &command
++ ));
++
++ /* Initialize the object. */
++ command->object.type = gcvOBJ_COMMAND;
++
++ /* Set the object pointers. */
++ command->kernel = Kernel;
++ command->os = Kernel->os;
++ command->hardware = Kernel->hardware;
++
++ /* Reset pointers. */
++ command->queue = gcvNULL;
++ command->queueMutex = gcvNULL;
++ command->taskMutex = gcvNULL;
++ command->commitMutex = gcvNULL;
++
++ command->powerStallBuffer = gcvNULL;
++ command->powerStallSignal = gcvNULL;
++ command->powerSemaphore = gcvNULL;
++
++ /* Reset context states. */
++ command->contextCounter = 0;
++ command->currentContext = 0;
++
++ /* Enable command buffer dumping. */
++ command->enableDumping = gcvTRUE;
++
++ /* Set features. */
++ command->fe20 = Kernel->hardware->fe20;
++ command->vg20 = Kernel->hardware->vg20;
++ command->vg21 = Kernel->hardware->vg21;
++
++ /* Reset task table .*/
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ command->taskTable, gcmSIZEOF(command->taskTable)
++ ));
++
++ /* Query command buffer attributes. */
++ gcmkERR_BREAK(gckVGCOMMAND_InitializeInfo(command));
++
++ /* Create the control mutexes. */
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->queueMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->taskMutex));
++ gcmkERR_BREAK(gckOS_CreateMutex(Kernel->os, &command->commitMutex));
++
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphore(Kernel->os,
++ &command->powerSemaphore));
++
++ gcmkERR_BREAK(gckOS_CreateSignal(Kernel->os,
++ gcvFALSE, &command->powerStallSignal));
++
++ /***********************************************************************
++ ** Command queue initialization.
++ */
++
++ /* Allocate the command queue. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ QueueSize,
++ (gctPOINTER *) &command->queue
++ ));
++
++ /* Initialize the command queue. */
++ queue = command->queue;
++
++ queue->size = QueueSize - gcmSIZEOF(gcsKERNEL_QUEUE_HEADER);
++ queue->pending = 0;
++ queue->next = queue;
++
++ command->queueHead =
++ command->queueTail =
++ command->mergeQueue = command->queue;
++
++ command->queueOverflow = 0;
++
++
++ /***********************************************************************
++ ** Enable TS overflow interrupt.
++ */
++
++ command->info.tsOverflowInt = 0;
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->info.tsOverflowInt,
++ _EventHandler_TSOverflow
++ ));
++
++ /* Mask out the interrupt. */
++ Kernel->hardware->eventMask &= ~(1 << command->info.tsOverflowInt);
++
++
++ /***********************************************************************
++ ** Enable Bus Error interrupt.
++ */
++
++ /* Hardwired to bit 31. */
++ command->busErrorInt = 31;
++
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->busErrorInt,
++ _EventHandler_BusError
++ ));
++
++
++ command->powerStallInt = 30;
++ /* Enable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &command->powerStallInt,
++ _EventHandler_PowerStall
++ ));
++
++ /***********************************************************************
++ ** Task management initialization.
++ */
++
++ command->taskStorage = gcvNULL;
++ command->taskStorageGranularity = TaskGranularity;
++ command->taskStorageUsable = TaskGranularity - gcmSIZEOF(gcsTASK_STORAGE);
++
++ command->taskFreeHead = gcvNULL;
++ command->taskFreeTail = gcvNULL;
++
++ /* Enable block handlers. */
++ for (i = 0; i < gcmCOUNTOF(_blockHandlers); i += 1)
++ {
++ /* Get the target hardware block. */
++ gceBLOCK block = _blockHandlers[i].block;
++
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[block];
++
++ /* Determine the interrupt value index. */
++ gctUINT index = entry->interruptCount;
++
++ /* Create the block semaphore. */
++ if (entry->interruptSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ command->os, &entry->interruptSemaphore
++ ));
++ }
++
++ /* Enable auto-detection. */
++ entry->interruptArray[index] = -1;
++
++ /* Enable interrupt for the block. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Enable(
++ Kernel->interrupt,
++ &entry->interruptArray[index],
++ _blockHandlers[i].handler
++ ));
++
++ /* Update the number of registered interrupts. */
++ entry->interruptCount += 1;
++
++ /* Inrement the semaphore to allow the usage of the registered
++ interrupt. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Get the FE interrupt. */
++ command->info.feBufferInt
++ = command->taskTable[gcvBLOCK_COMMAND].interruptArray[0];
++
++ /* Return gckVGCOMMAND object pointer. */
++ *Command = command;
++
++ gcmkFOOTER_ARG("*Command=0x%x",*Command);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (command != gcvNULL)
++ {
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the task table entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &command->taskTable[i];
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ for (j = 0; j < entry->interruptCount; j += 1)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[j] >= 0);
++ gcmkASSERT(entry->interruptArray[j] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ entry->interruptArray[j]
++ ));
++ }
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (command->info.tsOverflowInt != -1)
++ {
++ gcmkCHECK_STATUS(gckVGINTERRUPT_Disable(
++ Kernel->interrupt,
++ command->info.tsOverflowInt
++ ));
++ }
++
++ /* Delete the commit mutex. */
++ if (command->commitMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->commitMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->taskMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->taskMutex
++ ));
++ }
++
++ /* Delete the command queue mutex. */
++ if (command->queueMutex != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DeleteMutex(
++ Kernel->os, command->queueMutex
++ ));
++ }
++
++ /* Delete the command queue. */
++ if (command->queue != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command->queue
++ ));
++ }
++
++ if (command->powerSemaphore != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DestroySemaphore(
++ Kernel->os, command->powerSemaphore));
++ }
++
++ if (command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkVERIFY_OK(gckOS_DestroySignal(
++ Kernel->os,
++ command->powerStallSignal));
++ }
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkCHECK_STATUS(gckOS_Free(
++ Kernel->os, command
++ ));
++ }
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ OUT gckVGCOMMAND Command
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Command=0x%x", Command);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++
++ do
++ {
++ gctUINT i;
++ gcsTASK_STORAGE_PTR nextStorage;
++
++ if (Command->queueHead != gcvNULL)
++ {
++ /* Wait until the head becomes idle. */
++ gcmkERR_BREAK(_WaitForIdle(Command, Command->queueHead));
++ }
++
++ /* Disable block handlers. */
++ for (i = 0; i < gcvBLOCK_COUNT; i += 1)
++ {
++ /* Get the interrupt array entry. */
++ gcsBLOCK_TASK_ENTRY_PTR entry = &Command->taskTable[i];
++
++ /* Determine the index of the last interrupt in the array. */
++ gctINT index = entry->interruptCount - 1;
++
++ /* Destroy the semaphore. */
++ if (entry->interruptSemaphore != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, entry->interruptSemaphore
++ ));
++ }
++
++ /* Disable all enabled interrupts. */
++ while (index >= 0)
++ {
++ /* Must be a valid value. */
++ gcmkASSERT(entry->interruptArray[index] >= 0);
++ gcmkASSERT(entry->interruptArray[index] <= 31);
++
++ /* Disable the interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ entry->interruptArray[index]
++ ));
++
++ /* Update to the next interrupt. */
++ index -= 1;
++ entry->interruptCount -= 1;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Disable the bus error interrupt. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->busErrorInt
++ ));
++
++ /* Disable TS overflow interrupt. */
++ if (Command->info.tsOverflowInt != -1)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Disable(
++ Command->kernel->interrupt,
++ Command->info.tsOverflowInt
++ ));
++
++ Command->info.tsOverflowInt = -1;
++ }
++
++ /* Delete the commit mutex. */
++ if (Command->commitMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->commitMutex
++ ));
++
++ Command->commitMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->taskMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->taskMutex
++ ));
++
++ Command->taskMutex = gcvNULL;
++ }
++
++ /* Delete the command queue mutex. */
++ if (Command->queueMutex != gcvNULL)
++ {
++ gcmkERR_BREAK(gckOS_DeleteMutex(
++ Command->os, Command->queueMutex
++ ));
++
++ Command->queueMutex = gcvNULL;
++ }
++
++ if (Command->powerSemaphore != gcvNULL)
++ {
++ /* Destroy the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++
++ if (Command->powerStallSignal != gcvNULL)
++ {
++ /* Create the power management semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySignal(
++ Command->os,
++ Command->powerStallSignal));
++ }
++
++ if (Command->queue != gcvNULL)
++ {
++ /* Delete the command queue. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->queue
++ ));
++ }
++
++ /* Destroy all allocated buffers. */
++ while (Command->taskStorage)
++ {
++ /* Copy the buffer pointer. */
++ nextStorage = Command->taskStorage->next;
++
++ /* Free the current container. */
++ gcmkERR_BREAK(gckOS_Free(
++ Command->os, Command->taskStorage
++ ));
++
++ /* Advance to the next one. */
++ Command->taskStorage = nextStorage;
++ }
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* Mark the object as unknown. */
++ Command->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGCOMMAND structure. */
++ gcmkERR_BREAK(gckOS_Free(Command->os, Command));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Restore the object type if failed. */
++ Command->object.type = gcvOBJ_COMMAND;
++
++ gcmkFOOTER();
++ /* Return the error. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gcmkHEADER_ARG("Command=0x%x Information=0x%x", Command, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Information != gcvNULL);
++
++ /* Copy the information. */
++ gcmkVERIFY_OK(gckOS_MemCopy(
++ Information, &Command->info, sizeof(gcsCOMMAND_BUFFER_INFO)
++ ));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x Size=0x%x CommandBuffer=0x%x Data=0x%x",
++ Command, Size, CommandBuffer, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ do
++ {
++ /* Allocate the buffer. */
++ gcmkERR_BREAK(_AllocateCommandBuffer(Command, Size, CommandBuffer));
++
++ /* Determine the data pointer. */
++ * Data = (gctUINT8_PTR) (*CommandBuffer) + (* CommandBuffer)->bufferOffset;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ /* Free command buffer. */
++ status = _FreeCommandBuffer(Command->kernel, CommandBuffer);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Command=0x%x CommandBuffer=0x%x",
++ Command, CommandBuffer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(CommandBuffer != gcvNULL);
++
++ do
++ {
++ gctUINT queueLength;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Set the buffer. */
++ kernelEntry->commandBuffer = CommandBuffer;
++ kernelEntry->handler = _FreeKernelCommandBuffer;
++
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, 1
++ ));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ )
++{
++ /*
++ The first buffer is executed through a direct gckVGHARDWARE_Execute call,
++ therefore only an update is needed after the execution is over. All
++ consequent buffers need to be executed upon the first update call from
++ the FE interrupt handler.
++ */
++
++ static gcsQUEUE_UPDATE_CONTROL _dynamicBuffer[] =
++ {
++ {
++ _UpdateDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ },
++ {
++ _ExecuteDynamicCommandBuffer,
++ _UpdateDynamicCommandBuffer,
++ _ExecuteLastDynamicCommandBuffer,
++ _UpdateLastDynamicCommandBuffer
++ }
++ };
++
++ static gcsQUEUE_UPDATE_CONTROL _staticBuffer[] =
++ {
++ {
++ _UpdateStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ },
++ {
++ _ExecuteStaticCommandBuffer,
++ _UpdateStaticCommandBuffer,
++ _ExecuteLastStaticCommandBuffer,
++ _UpdateLastStaticCommandBuffer
++ }
++ };
++
++ gceSTATUS status, last;
++
++ gcmkHEADER_ARG("Command=0x%x Context=0x%x Queue=0x%x EntryCount=0x%x TaskTable=0x%x",
++ Command, Context, Queue, EntryCount, TaskTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Command, gcvOBJ_COMMAND);
++ gcmkVERIFY_ARGUMENT(Context != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++ gcmkVERIFY_ARGUMENT(EntryCount > 1);
++
++#ifdef __QNXNTO__
++ TaskTable->coid = Context->coid;
++ TaskTable->rcvid = Context->rcvid;
++#endif /* __QNXNTO__ */
++
++ do
++ {
++ gctBOOL haveFETasks;
++ gctUINT queueSize;
++ gcsVGCMDQUEUE_PTR mappedQueue;
++ gcsVGCMDQUEUE_PTR userEntry;
++ gcsKERNEL_CMDQUEUE_PTR kernelEntry;
++ gcsQUEUE_UPDATE_CONTROL_PTR queueControl;
++ gctUINT currentLength;
++ gctUINT queueLength;
++ gctUINT entriesQueued;
++ gctUINT8_PTR previousEnd;
++ gctBOOL previousDynamic;
++ gctBOOL previousExecuted;
++ gctUINT controlIndex;
++
++ gcmkERR_BREAK(gckVGHARDWARE_SetPowerManagementState(
++ Command->hardware, gcvPOWER_ON_AUTO
++ ));
++
++ /* Acquire the power semaphore. */
++ gcmkERR_BREAK(gckOS_AcquireSemaphore(
++ Command->os, Command->powerSemaphore
++ ));
++
++ /* Acquire the mutex. */
++ status = gckOS_AcquireMutex(
++ Command->os,
++ Command->commitMutex,
++ gcvINFINITE
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ break;
++ }
++
++ do
++ {
++ gcmkERR_BREAK(_FlushMMU(Command));
++
++ /* Assign a context ID if not yet assigned. */
++ if (Context->id == 0)
++ {
++ /* Assign the next context number. */
++ Context->id = ++ Command->contextCounter;
++
++ /* See if we overflowed. */
++ if (Command->contextCounter == 0)
++ {
++ /* We actually did overflow, wow... */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++ }
++
++ /* The first entry in the queue is always the context buffer.
++ Verify whether the user context is the same as the current
++ context and if that's the case, skip the first entry. */
++ if (Context->id == Command->currentContext)
++ {
++ /* Same context as before, skip the first entry. */
++ EntryCount -= 1;
++ Queue += 1;
++
++ /* Set the signal to avoid user waiting. */
++#ifdef __QNXNTO__
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->rcvid, Context->coid
++ ));
++#else
++ gcmkERR_BREAK(gckOS_UserSignal(
++ Command->os, Context->signal, Context->process
++ ));
++
++#endif /* __QNXNTO__ */
++
++ }
++ else
++ {
++ /* Different user context - keep the first entry.
++ Set the user context as the current one. */
++ Command->currentContext = Context->id;
++ }
++
++ /* Reset pointers. */
++ queueControl = gcvNULL;
++ previousEnd = gcvNULL;
++
++ /* Determine whether there are FE tasks to be performed. */
++ haveFETasks = (TaskTable->table[gcvBLOCK_COMMAND].head != gcvNULL);
++
++ /* Determine the size of the queue. */
++ queueSize = EntryCount * gcmSIZEOF(gcsVGCMDQUEUE);
++
++ /* Map the command queue into the kernel space. */
++ gcmkERR_BREAK(gckOS_MapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ (gctPOINTER *) &mappedQueue
++ ));
++
++ /* Set the first entry. */
++ userEntry = mappedQueue;
++
++ /* Process the command queue. */
++ while (EntryCount)
++ {
++ /* Lock the current queue. */
++ gcmkERR_BREAK(_LockCurrentQueue(
++ Command, &kernelEntry, &queueLength
++ ));
++
++ /* Determine the number of entries to process. */
++ currentLength = (queueLength < EntryCount)
++ ? queueLength
++ : EntryCount;
++
++ /* Update the number of the entries left to process. */
++ EntryCount -= currentLength;
++
++ /* Reset previous flags. */
++ previousDynamic = gcvFALSE;
++ previousExecuted = gcvFALSE;
++
++ /* Set the initial control index. */
++ controlIndex = 0;
++
++ /* Process entries. */
++ for (entriesQueued = 0; entriesQueued < currentLength; entriesQueued += 1)
++ {
++ /* Get the kernel pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer = gcvNULL;
++ gcmkERR_BREAK(_ConvertUserCommandBufferPointer(
++ Command,
++ userEntry->commandBuffer,
++ &commandBuffer
++ ));
++
++ /* Is it a dynamic command buffer? */
++ if (userEntry->dynamic)
++ {
++ /* Select dynamic buffer control functions. */
++ queueControl = &_dynamicBuffer[controlIndex];
++ }
++
++ /* No, a static command buffer. */
++ else
++ {
++ /* Select static buffer control functions. */
++ queueControl = &_staticBuffer[controlIndex];
++ }
++
++ /* Set the command buffer pointer to the entry. */
++ kernelEntry->commandBuffer = commandBuffer;
++
++ /* If the previous entry was a dynamic command buffer,
++ link it to the current. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_FetchCommand(
++ Command,
++ previousEnd,
++ commandBuffer->address,
++ commandBuffer->dataCount,
++ gcvNULL
++ ));
++
++ /* The buffer will be auto-executed, only need to
++ update it after it has been executed. */
++ kernelEntry->handler = queueControl->update;
++
++ /* The buffer is only being updated. */
++ previousExecuted = gcvFALSE;
++ }
++ else
++ {
++ /* Set the buffer up for execution. */
++ kernelEntry->handler = queueControl->execute;
++
++ /* The buffer is being updated. */
++ previousExecuted = gcvTRUE;
++ }
++
++ /* The current buffer's END command becomes the last END. */
++ previousEnd
++ = ((gctUINT8_PTR) commandBuffer)
++ + commandBuffer->bufferOffset
++ + commandBuffer->dataCount * Command->info.commandAlignment
++ - Command->info.staticTailSize;
++
++ /* Update the last entry info. */
++ previousDynamic = userEntry->dynamic;
++
++ /* Advance entries. */
++ userEntry += 1;
++ kernelEntry += 1;
++
++ /* Update the control index. */
++ controlIndex = 1;
++ }
++
++ /* If the previous entry was a dynamic command buffer,
++ terminate it with an END. */
++ if (previousDynamic)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_EndCommand(
++ Command,
++ previousEnd,
++ Command->info.feBufferInt,
++ gcvNULL
++ ));
++ }
++
++ /* Last buffer? */
++ if (EntryCount == 0)
++ {
++ /* Modify the last command buffer's routines to handle
++ tasks if any.*/
++ if (haveFETasks)
++ {
++ if (previousExecuted)
++ {
++ kernelEntry[-1].handler = queueControl->lastExecute;
++ }
++ else
++ {
++ kernelEntry[-1].handler = queueControl->lastUpdate;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkERR_BREAK(gckOS_ReleaseMutex(
++ Command->os,
++ Command->queueMutex
++ ));
++ /* Schedule tasks. */
++ gcmkERR_BREAK(_ScheduleTasks(Command, TaskTable, previousEnd));
++
++ /* Acquire the mutex. */
++ gcmkERR_BREAK(gckOS_AcquireMutex(
++ Command->os,
++ Command->queueMutex,
++ gcvINFINITE
++ ));
++ }
++
++ /* Unkock and schedule the current queue for execution. */
++ gcmkERR_BREAK(_UnlockCurrentQueue(
++ Command, currentLength
++ ));
++ }
++
++
++ /* Unmap the user command buffer. */
++ gcmkERR_BREAK(gckOS_UnmapUserPointer(
++ Command->os,
++ Queue,
++ queueSize,
++ mappedQueue
++ ));
++ }
++ while (gcvFALSE);
++
++ /* Release the mutex. */
++ gcmkCHECK_STATUS(gckOS_ReleaseMutex(
++ Command->os,
++ Command->commitMutex
++ ));
++
++ gcmkVERIFY_OK(gckOS_ReleaseSemaphore(
++ Command->os, Command->powerSemaphore));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_db.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_db.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_db.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_db.c 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,1618 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_DATABASE
++
++/*******************************************************************************
++***** Private fuctions ********************************************************/
++
++#define _GetSlot(database, x) \
++ (gctUINT32)(((gcmPTR_TO_UINT64(x) >> 7) % gcmCOUNTOF(database->list)))
++
++/*******************************************************************************
++** gckKERNEL_NewDatabase
++**
++** Create a new database structure and insert it to the head of the hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++static gceSTATUS
++gckKERNEL_NewDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctBOOL acquired = gcvFALSE;
++ gctSIZE_T slot;
++ gcsDATABASE_PTR existingDatabase;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Walk the hash list. */
++ for (existingDatabase = Kernel->db->db[slot];
++ existingDatabase != gcvNULL;
++ existingDatabase = existingDatabase->next)
++ {
++ if (existingDatabase->processID == ProcessID)
++ {
++ /* One process can't be added twice. */
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++ }
++
++ if (Kernel->db->freeDatabase != gcvNULL)
++ {
++ /* Allocate a database from the free list. */
++ database = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = database->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate a new database from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE),
++ &pointer));
++
++ database = pointer;
++ }
++
++ /* Insert the database into the hash. */
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++
++ /* Save the hash slot. */
++ database->slot = slot;
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindDatabase
++**
++** Find a database identified by a process ID and move it to the head of the
++** hash list.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** ProcessID that identifies the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** OUTPUT:
++**
++** gcsDATABASE_PTR * Database
++** Pointer to a variable receiving the database structure pointer on
++** success.
++*/
++static gceSTATUS
++gckKERNEL_FindDatabase(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ OUT gcsDATABASE_PTR * Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database, previous;
++ gctSIZE_T slot;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d LastProcessID=%d",
++ Kernel, ProcessID, LastProcessID);
++
++ /* Compute the hash for the database. */
++ slot = ProcessID % gcmCOUNTOF(Kernel->db->db);
++
++ /* Check whether we are getting the last known database. */
++ if (LastProcessID)
++ {
++ /* Use last database. */
++ database = Kernel->db->lastDatabase;
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ else
++ {
++ /* Walk the hash list. */
++ for (previous = gcvNULL, database = Kernel->db->db[slot];
++ database != gcvNULL;
++ database = database->next)
++ {
++ if (database->processID == ProcessID)
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = database;
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Database not found. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (previous != gcvNULL)
++ {
++ /* Move database to the head of the hash list. */
++ previous->next = database->next;
++ database->next = Kernel->db->db[slot];
++ Kernel->db->db[slot] = database;
++ }
++ }
++
++ /* Return the database. */
++ *Database = database;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Database=0x%x", *Database);
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteDatabase
++**
++** Remove a database from the hash list and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to the database structure to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++static gceSTATUS
++gckKERNEL_DeleteDatabase(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ /* Check slot value. */
++ gcmkVERIFY_ARGUMENT(Database->slot < gcmCOUNTOF(Kernel->db->db));
++
++ if (Database->slot < gcmCOUNTOF(Kernel->db->db))
++ {
++ /* Check if database if the head of the hash list. */
++ if (Kernel->db->db[Database->slot] == Database)
++ {
++ /* Remove the database from the hash list. */
++ Kernel->db->db[Database->slot] = Database->next;
++ }
++ else
++ {
++ /* Walk the has list to find the database. */
++ for (database = Kernel->db->db[Database->slot];
++ database != gcvNULL;
++ database = database->next
++ )
++ {
++ /* Check if the next list entry is this database. */
++ if (database->next == Database)
++ {
++ /* Remove the database from the hash list. */
++ database->next = Database->next;
++ break;
++ }
++ }
++
++ if (database == gcvNULL)
++ {
++ /* Ouch! Something got corrupted. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++ }
++ }
++
++ if (Kernel->db->lastDatabase != gcvNULL)
++ {
++ /* Insert database to the free list. */
++ Kernel->db->lastDatabase->next = Kernel->db->freeDatabase;
++ Kernel->db->freeDatabase = Kernel->db->lastDatabase;
++ }
++
++ /* Keep database as the last database. */
++ Kernel->db->lastDatabase = Database;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_NewRecord
++**
++** Create a new database record structure and insert it to the head of the
++** database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR * Record
++** Pointer to a variable receiving the database record structure
++** pointer on success.
++*/
++static gceSTATUS
++gckKERNEL_NewRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gctUINT32 Slot,
++ OUT gcsDATABASE_RECORD_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x", Kernel, Database);
++
++ if (Kernel->db->freeRecord != gcvNULL)
++ {
++ /* Allocate the record from the free list. */
++ record = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record->next;
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Allocate the record from the heap. */
++ gcmkONERROR(gckOS_Allocate(Kernel->os,
++ gcmSIZEOF(gcsDATABASE_RECORD),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Insert the record in the database. */
++ record->next = Database->list[Slot];
++ Database->list[Slot] = record;
++
++ /* Return the record. */
++ *Record = record;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", *Record);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DeleteRecord
++**
++** Remove a database record from the database and delete its structure.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_DeleteRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T_PTR Bytes OPTIONAL
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_RECORD_PTR record, previous;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot], previous = gcvNULL;
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++
++ previous = record;
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Bytes != gcvNULL)
++ {
++ /* Return size of record. */
++ *Bytes = record->bytes;
++ }
++
++ /* Remove record from database. */
++ if (previous == gcvNULL)
++ {
++ Database->list[slot] = record->next;
++ }
++ else
++ {
++ previous->next = record->next;
++ }
++
++ /* Insert record in free list. */
++ record->next = Kernel->db->freeRecord;
++ Kernel->db->freeRecord = record;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu", gcmOPT_VALUE(Bytes));
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindRecord
++**
++** Find a database record from the database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gcsDATABASE_PTR Database
++** Pointer to a database structure.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to remove.
++**
++** gctPOINTER Data
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gctSIZE_T_PTR Bytes
++** Pointer to a variable that receives the size of the record deleted.
++** Can be gcvNULL if the size is not required.
++*/
++static gceSTATUS
++gckKERNEL_FindRecord(
++ IN gckKERNEL Kernel,
++ IN gcsDATABASE_PTR Database,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Data,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_RECORD_PTR record;
++ gctUINT32 slot = _GetSlot(Database, Data);
++
++ gcmkHEADER_ARG("Kernel=0x%x Database=0x%x Type=%d Data=0x%x",
++ Kernel, Database, Type, Data);
++
++ /* Scan the database for this record. */
++ for (record = Database->list[slot];
++ record != gcvNULL;
++ record = record->next
++ )
++ {
++ if ((record->type == Type)
++ && (record->data == Data)
++ )
++ {
++ /* Found it! */
++ break;
++ }
++ }
++
++ if (record == gcvNULL)
++ {
++ /* Ouch! This record is not found? */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ if (Record != gcvNULL)
++ {
++ /* Return information of record. */
++ gcmkONERROR(
++ gckOS_MemCopy(Record, record, sizeof(gcsDATABASE_RECORD)));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("Record=0x%x", Record);
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++/*******************************************************************************
++***** Public API **************************************************************/
++
++/*******************************************************************************
++** gckKERNEL_CreateProcessDB
++**
++** Create a new process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database = gcvNULL;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Create a new database. */
++ gcmkONERROR(gckKERNEL_NewDatabase(Kernel, ProcessID, &database));
++
++ /* Initialize the database. */
++ database->processID = ProcessID;
++ database->vidMem.bytes = 0;
++ database->vidMem.maxBytes = 0;
++ database->vidMem.totalBytes = 0;
++ database->nonPaged.bytes = 0;
++ database->nonPaged.maxBytes = 0;
++ database->nonPaged.totalBytes = 0;
++ database->contiguous.bytes = 0;
++ database->contiguous.maxBytes = 0;
++ database->contiguous.totalBytes = 0;
++ database->mapMemory.bytes = 0;
++ database->mapMemory.maxBytes = 0;
++ database->mapMemory.totalBytes = 0;
++ database->mapUserMemory.bytes = 0;
++ database->mapUserMemory.maxBytes = 0;
++ database->mapUserMemory.totalBytes = 0;
++ database->vidMemResv.bytes = 0;
++ database->vidMemResv.maxBytes = 0;
++ database->vidMemResv.totalBytes = 0;
++ database->vidMemCont.bytes = 0;
++ database->vidMemCont.maxBytes = 0;
++ database->vidMemCont.totalBytes = 0;
++ database->vidMemVirt.bytes = 0;
++ database->vidMemVirt.maxBytes = 0;
++ database->vidMemVirt.totalBytes = 0;
++
++ for (i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++ database->list[i] = gcvNULL;
++ }
++
++#if gcdSECURE_USER
++ {
++ gctINT slot;
++ gcskSECURE_CACHE * cache = &database->cache;
++
++ /* Setup the linked list of cache nodes. */
++ for (slot = 1; slot <= gcdSECURE_CACHE_SLOTS; ++slot)
++ {
++ cache->cache[slot].logical = gcvNULL;
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ cache->cache[slot].prev = &cache->cache[slot - 1];
++ cache->cache[slot].next = &cache->cache[slot + 1];
++# endif
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ cache->cache[slot].nextHash = gcvNULL;
++ cache->cache[slot].prevHash = gcvNULL;
++# endif
++ }
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Setup the head and tail of the cache. */
++ cache->cache[0].next = &cache->cache[1];
++ cache->cache[0].prev = &cache->cache[gcdSECURE_CACHE_SLOTS];
++ cache->cache[0].logical = gcvNULL;
++
++ /* Fix up the head and tail pointers. */
++ cache->cache[0].next->prev = &cache->cache[0];
++ cache->cache[0].prev->next = &cache->cache[0];
++# endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Zero out the hash table. */
++ for (slot = 0; slot < gcmCOUNTOF(cache->hash); ++slot)
++ {
++ cache->hash[slot].logical = gcvNULL;
++ cache->hash[slot].nextHash = gcvNULL;
++ }
++# endif
++
++ /* Initialize cache index. */
++ cache->cacheIndex = gcvNULL;
++ cache->cacheFree = 1;
++ cache->cacheStamp = 0;
++ }
++#endif
++
++ /* Reset idle timer. */
++ Kernel->db->lastIdle = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_AddProcessDB
++**
++** Add a record to a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to add.
++**
++** gctPOINTER Pointer
++** Data of the record to add.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the record to add.
++**
++** gctSIZE_T Size
++** Size of the record to add.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record = gcvNULL;
++ gcsDATABASE_COUNTERS * count;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x "
++ "Physical=0x%x Size=%lu",
++ Kernel, ProcessID, Type, Pointer, Physical, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Special case the idle record. */
++ if (Type == gcvDB_IDLE)
++ {
++ gctUINT64 time;
++
++ /* Get the current profile time. */
++ gcmkONERROR(gckOS_GetProfileTick(&time));
++
++ if ((ProcessID == 0) && (Kernel->db->lastIdle != 0))
++ {
++ /* Out of idle, adjust time it was idle. */
++ Kernel->db->idleTime += time - Kernel->db->lastIdle;
++ Kernel->db->lastIdle = 0;
++ }
++ else if (ProcessID == 1)
++ {
++ /* Save current idle time. */
++ Kernel->db->lastIdle = time;
++ }
++
++#if gcdDYNAMIC_SPEED
++ {
++ /* Test for first call. */
++ if (Kernel->db->lastSlowdown == 0)
++ {
++ /* Save milliseconds. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ else
++ {
++ /* Compute ellapsed time in milliseconds. */
++ gctUINT delta = gckOS_ProfileToMS(time - Kernel->db->lastSlowdown);
++
++ /* Test for end of period. */
++ if (delta >= gcdDYNAMIC_SPEED)
++ {
++ /* Compute number of idle milliseconds. */
++ gctUINT idle = gckOS_ProfileToMS(
++ Kernel->db->idleTime - Kernel->db->lastSlowdownIdle);
++
++ /* Broadcast to slow down the GPU. */
++ gcmkONERROR(gckOS_BroadcastCalibrateSpeed(Kernel->os,
++ Kernel->hardware,
++ idle,
++ delta));
++
++ /* Save current time. */
++ Kernel->db->lastSlowdown = time;
++ Kernel->db->lastSlowdownIdle = Kernel->db->idleTime;
++ }
++ }
++ }
++#endif
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Create a new record in the database. */
++ gcmkONERROR(gckKERNEL_NewRecord(Kernel, database, _GetSlot(database, Pointer), &record));
++
++ /* Initialize the record. */
++ record->kernel = Kernel;
++ record->type = Type;
++ record->data = Pointer;
++ record->physical = Physical;
++ record->bytes = Size;
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ count = &database->vidMem;
++ break;
++
++ case gcvDB_NON_PAGED:
++ count = &database->nonPaged;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ count = &database->contiguous;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ count = &database->mapMemory;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ count = &database->mapUserMemory;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ count = &database->vidMemResv;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ count = &database->vidMemCont;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ count = &database->vidMemVirt;
++ break;
++
++ default:
++ count = gcvNULL;
++ break;
++ }
++
++ if (count != gcvNULL)
++ {
++ /* Adjust counters. */
++ count->totalBytes += Size;
++ count->bytes += Size;
++
++ if (count->bytes > count->maxBytes)
++ {
++ count->maxBytes = count->bytes;
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_RemoveProcessDB
++**
++** Remove a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctSIZE_T bytes = 0;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Delete the record. */
++ gcmkONERROR(
++ gckKERNEL_DeleteRecord(Kernel, database, Type, Pointer, &bytes));
++
++ /* Update counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ database->vidMem.bytes -= bytes;
++ break;
++
++ case gcvDB_NON_PAGED:
++ database->nonPaged.bytes -= bytes;
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ database->contiguous.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ database->mapMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ database->mapUserMemory.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ database->vidMemResv.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ database->vidMemCont.bytes -= bytes;
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ database->vidMemVirt.bytes -= bytes;
++ break;
++
++ default:
++ break;
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_FindProcessDB
++**
++** Find a record from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gceDATABASE_TYPE TYPE
++** Type of the record to remove.
++**
++** gctPOINTER Pointer
++** Data of the record to remove.
++**
++** OUTPUT:
++**
++** gcsDATABASE_RECORD_PTR Record
++** Copy of record.
++*/
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Pointer=0x%x",
++ Kernel, ProcessID, ThreadID, Type, Pointer);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Find the record. */
++ gcmkONERROR(
++ gckKERNEL_FindRecord(Kernel, database, Type, Pointer, Record));
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_DestroyProcessDB
++**
++** Destroy a process database. If the database contains any records, the data
++** inside those records will be deleted as well. This aids in the cleanup if
++** a process has died unexpectedly or has memory leaks.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gcsDATABASE_RECORD_PTR record, next;
++ gctBOOL asynchronous;
++ gctPHYS_ADDR physical;
++ gcuVIDMEM_NODE_PTR node;
++ gckKERNEL kernel = Kernel;
++ gctUINT32 i;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): VidMem: total=%lu max=%lu",
++ ProcessID, database->vidMem.totalBytes,
++ database->vidMem.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): NonPaged: total=%lu max=%lu",
++ ProcessID, database->nonPaged.totalBytes,
++ database->nonPaged.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Contiguous: total=%lu max=%lu",
++ ProcessID, database->contiguous.totalBytes,
++ database->contiguous.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Idle time=%llu",
++ ProcessID, Kernel->db->idleTime);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapMemory.totalBytes,
++ database->mapMemory.maxBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DATABASE,
++ "DB(%d): Map: total=%lu max=%lu",
++ ProcessID, database->mapUserMemory.totalBytes,
++ database->mapUserMemory.maxBytes);
++
++ if (database->list != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "Process %d has entries in its database:",
++ ProcessID);
++ }
++
++ for(i = 0; i < gcmCOUNTOF(database->list); i++)
++ {
++
++ /* Walk all records. */
++ for (record = database->list[i]; record != gcvNULL; record = next)
++ {
++ /* Next next record. */
++ next = record->next;
++
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ acquired = gcvFALSE;
++
++ /* Dispatch on record type. */
++ switch (record->type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ /* Free the video memory. */
++ status = gckVIDMEM_Free(Kernel, gcmUINT64_TO_PTR(record->data));
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_NON_PAGED:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the non paged memory. */
++ status = gckOS_FreeNonPagedMemory(Kernel->os,
++ record->bytes,
++ physical,
++ record->data);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: NON_PAGED 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ case gcvDB_COMMAND_BUFFER:
++ /* Free the command buffer. */
++ status = gckEVENT_DestroyVirtualCommandBuffer(record->kernel->eventObj,
++ record->bytes,
++ gcmNAME_TO_PTR(record->physical),
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: COMMAND_BUFFER 0x%x, bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++#endif
++
++ case gcvDB_CONTIGUOUS:
++ physical = gcmNAME_TO_PTR(record->physical);
++ /* Unmap user logical memory first. */
++ status = gckOS_UnmapUserLogical(Kernel->os,
++ physical,
++ record->bytes,
++ record->data);
++
++ /* Free the contiguous memory. */
++ status = gckEVENT_FreeContiguousMemory(Kernel->eventObj,
++ record->bytes,
++ physical,
++ record->data,
++ gcvKERNEL_PIXEL);
++ gcmRELEASE_NAME(record->physical);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTIGUOUS 0x%x bytes=%lu (status=%d)",
++ record->data, record->bytes, status);
++ break;
++
++ case gcvDB_SIGNAL:
++#if USE_NEW_LINUX_SIGNAL
++ status = gcvSTATUS_NOT_SUPPORTED;
++#else
++ /* Free the user signal. */
++ status = gckOS_DestroyUserSignal(Kernel->os,
++ gcmPTR2INT(record->data));
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SIGNAL %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++
++ case gcvDB_VIDEO_MEMORY_LOCKED:
++ node = gcmUINT64_TO_PTR(record->data);
++ /* Unlock what we still locked */
++ status = gckVIDMEM_Unlock(record->kernel,
++ node,
++ gcvSURF_TYPE_UNKNOWN,
++ &asynchronous);
++
++ if (gcmIS_SUCCESS(status) && (gcvTRUE == asynchronous))
++ {
++ /* TODO: we maybe need to schedule a event here */
++ status = gckVIDMEM_Unlock(record->kernel,
++ node,
++ gcvSURF_TYPE_UNKNOWN,
++ gcvNULL);
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: VIDEO_MEMORY_LOCKED 0x%x (status=%d)",
++ node, status);
++ break;
++
++ case gcvDB_CONTEXT:
++ /* TODO: Free the context */
++ status = gckCOMMAND_Detach(Kernel->command, gcmNAME_TO_PTR(record->data));
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: CONTEXT 0x%x (status=%d)",
++ record->data, status);
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ /* Unmap memory. */
++ status = gckKERNEL_UnmapMemory(Kernel,
++ record->physical,
++ record->bytes,
++ record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP MEMORY %d (status=%d)",
++ gcmPTR2INT(record->data), status);
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ /* TODO: Unmap user memory. */
++ status = gckOS_UnmapUserMemory(Kernel->os,
++ Kernel->core,
++ record->physical,
++ record->bytes,
++ gcmNAME_TO_PTR(record->data),
++ 0);
++ gcmRELEASE_NAME(record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: MAP USER MEMORY %d (status=%d)",
++ gcmPTR2INT(record->data), status);
++ break;
++
++ case gcvDB_SHARED_INFO:
++ status = gckOS_FreeMemory(Kernel->os, record->physical);
++ break;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvDB_SYNC_POINT:
++ /* Free the user signal. */
++ status = gckOS_DestroySyncPoint(Kernel->os,
++ (gctSYNC_POINT) record->data);
++
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_DATABASE,
++ "DB: SYNC POINT %d (status=%d)",
++ (gctINT)(gctUINTPTR_T)record->data, status);
++ break;
++#endif
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ break;//Nothing to do
++
++ default:
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DATABASE,
++ "DB: Correcupted record=0x%08x type=%d",
++ record, record->type);
++ break;
++ }
++
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Delete the record. */
++ gcmkONERROR(gckKERNEL_DeleteRecord(Kernel,
++ database,
++ record->type,
++ record->data,
++ gcvNULL));
++ }
++
++ }
++
++ /* Delete the database. */
++ gcmkONERROR(gckKERNEL_DeleteDatabase(Kernel, database));
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckKERNEL_QueryProcessDB
++**
++** Query a process database for the current usage of a particular record type.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** gctBOOL LastProcessID
++** gcvTRUE if searching for the last known process ID. gcvFALSE if
++** we need to search for the process ID specified by the ProcessID
++** argument.
++**
++** gceDATABASE_TYPE Type
++** Type of the record to query.
++**
++** OUTPUT:
++**
++** gcuDATABASE_INFO * Info
++** Pointer to a variable that receives the requested information.
++*/
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d Type=%d Info=0x%x",
++ Kernel, ProcessID, Type, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Acquire the database mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Find the database. */
++ gcmkONERROR(
++ gckKERNEL_FindDatabase(Kernel, ProcessID, LastProcessID, &database));
++
++ /* Get pointer to counters. */
++ switch (Type)
++ {
++ case gcvDB_VIDEO_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMem,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_NON_PAGED:
++ gckOS_MemCopy(&Info->counters,
++ &database->nonPaged,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_CONTIGUOUS:
++ gckOS_MemCopy(&Info->counters,
++ &database->contiguous,
++ gcmSIZEOF(database->vidMem));
++ break;
++
++ case gcvDB_IDLE:
++ Info->time = Kernel->db->idleTime;
++ Kernel->db->idleTime = 0;
++ break;
++
++ case gcvDB_MAP_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapMemory,
++ gcmSIZEOF(database->mapMemory));
++ break;
++
++ case gcvDB_MAP_USER_MEMORY:
++ gckOS_MemCopy(&Info->counters,
++ &database->mapUserMemory,
++ gcmSIZEOF(database->mapUserMemory));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_RESERVED:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemResv,
++ gcmSIZEOF(database->vidMemResv));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_CONTIGUOUS:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemCont,
++ gcmSIZEOF(database->vidMemCont));
++ break;
++
++ case gcvDB_VIDEO_MEMORY_VIRTUAL:
++ gckOS_MemCopy(&Info->counters,
++ &database->vidMemVirt,
++ gcmSIZEOF(database->vidMemVirt));
++ break;
++
++ default:
++ break;
++ }
++
++ /* Release the database mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++/*******************************************************************************
++** gckKERNEL_GetProcessDBCache
++**
++** Get teh secure cache from a process database.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to a gckKERNEL object.
++**
++** gctUINT32 ProcessID
++** Process ID used to identify the database.
++**
++** OUTPUT:
++**
++** gcskSECURE_CACHE_PTR * Cache
++** Pointer to a variable that receives the secure cache pointer.
++*/
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ )
++{
++ gceSTATUS status;
++ gcsDATABASE_PTR database;
++
++ gcmkHEADER_ARG("Kernel=0x%x ProcessID=%d", Kernel, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Cache != gcvNULL);
++
++ /* Find the database. */
++ gcmkONERROR(gckKERNEL_FindDatabase(Kernel, ProcessID, gcvFALSE, &database));
++
++ /* Return the pointer to the cache. */
++ *Cache = &database->cache;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Cache=0x%x", *Cache);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ )
++{
++ gcsDATABASE_PTR database;
++ gctINT i, pid;
++ gctUINT8 name[24];
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Acquire the database mutex. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Kernel->os, Kernel->db->dbMutex, gcvINFINITE));
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** PROCESS DB DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++ gcmkPRINT_N(8, "%-8s%s\n", "PID", "NAME");
++ /* Walk the databases. */
++ for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
++ {
++ for (database = Kernel->db->db[i];
++ database != gcvNULL;
++ database = database->next)
++ {
++ pid = database->processID;
++
++ gcmkVERIFY_OK(gckOS_ZeroMemory(name, gcmSIZEOF(name)));
++
++ gcmkVERIFY_OK(gckOS_GetProcessNameByPid(pid, gcmSIZEOF(name), name));
++
++ gcmkPRINT_N(8, "%-8d%s\n", pid, name);
++ }
++ }
++
++ /* Release the database mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->db->dbMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_debug.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_debug.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_debug.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_debug.c 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,2559 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include <gc_hal_kernel_debug.h>
++
++/******************************************************************************\
++******************************** Debug Variables *******************************
++\******************************************************************************/
++
++static gceSTATUS _lastError = gcvSTATUS_OK;
++static gctUINT32 _debugLevel = gcvLEVEL_ERROR;
++/*
++_debugZones config value
++Please Reference define in gc_hal_base.h
++*/
++static gctUINT32 _debugZones = gcvZONE_NONE;
++
++/******************************************************************************\
++********************************* Debug Switches *******************************
++\******************************************************************************/
++
++/*
++ gcdBUFFERED_OUTPUT
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console. gcdBUFFERED_SIZE determines the size of the buffer.
++*/
++#define gcdBUFFERED_OUTPUT 0
++
++/*
++ gcdBUFFERED_SIZE
++
++ When set to non-zero, all output is collected into a buffer with the
++ specified size. Once the buffer gets full, the debug buffer will be
++ printed to the console.
++*/
++#define gcdBUFFERED_SIZE (1024 * 1024 * 2)
++
++/*
++ gcdDMA_BUFFER_COUNT
++
++ If greater then zero, the debugger will attempt to find the command buffer
++ where DMA is currently executing and then print this buffer and
++ (gcdDMA_BUFFER_COUNT - 1) buffers before the current one. If set to zero
++ or the current buffer is not found, all buffers are printed.
++*/
++#define gcdDMA_BUFFER_COUNT 0
++
++/*
++ gcdTHREAD_BUFFERS
++
++ When greater then one, will accumulate messages from the specified number
++ of threads in separate output buffers.
++*/
++#define gcdTHREAD_BUFFERS 1
++
++/*
++ gcdENABLE_OVERFLOW
++
++ When set to non-zero, and the output buffer gets full, instead of being
++ printed, it will be allowed to overflow removing the oldest messages.
++*/
++#define gcdENABLE_OVERFLOW 1
++
++/*
++ gcdSHOW_LINE_NUMBER
++
++ When enabledm each print statement will be preceeded with the current
++ line number.
++*/
++#define gcdSHOW_LINE_NUMBER 0
++
++/*
++ gcdSHOW_PROCESS_ID
++
++ When enabledm each print statement will be preceeded with the current
++ process ID.
++*/
++#define gcdSHOW_PROCESS_ID 0
++
++/*
++ gcdSHOW_THREAD_ID
++
++ When enabledm each print statement will be preceeded with the current
++ thread ID.
++*/
++#define gcdSHOW_THREAD_ID 0
++
++/*
++ gcdSHOW_TIME
++
++ When enabled each print statement will be preceeded with the current
++ high-resolution time.
++*/
++#define gcdSHOW_TIME 0
++
++
++/******************************************************************************\
++****************************** Miscellaneous Macros ****************************
++\******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmDBGASSERT(Expression, Format, Value) \
++ if (!(Expression)) \
++ { \
++ _DirectPrint( \
++ "*** gcmDBGASSERT ***************************\n" \
++ " function : %s\n" \
++ " line : %d\n" \
++ " expression : " #Expression "\n" \
++ " actual value : " Format "\n", \
++ __FUNCTION__, __LINE__, Value \
++ ); \
++ }
++#else
++# define gcmDBGASSERT(Expression, Format, Value)
++#endif
++
++#define gcmPTRALIGNMENT(Pointer, Alignemnt) \
++( \
++ gcmALIGN(gcmPTR2INT(Pointer), Alignemnt) - gcmPTR2INT(Pointer) \
++)
++
++#if gcdALIGNBYSIZE
++# define gcmISALIGNED(Offset, Alignment) \
++ (((Offset) & ((Alignment) - 1)) == 0)
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment) \
++ Pointer = (Type) gcmINT2PTR(gcmALIGN(gcmPTR2INT(Pointer), Alignment))
++#else
++# define gcmISALIGNED(Offset, Alignment) \
++ gcvTRUE
++
++# define gcmkALIGNPTR(Type, Pointer, Alignment)
++#endif
++
++#define gcmALIGNSIZE(Offset, Size) \
++ ((Size - Offset) + Size)
++
++#define gcdHAVEPREFIX \
++( \
++ gcdSHOW_TIME \
++ || gcdSHOW_LINE_NUMBER \
++ || gcdSHOW_PROCESS_ID \
++ || gcdSHOW_THREAD_ID \
++)
++
++#if gcdHAVEPREFIX
++
++# define gcdOFFSET 0
++
++#if gcdSHOW_TIME
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdTIMESIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdTIMESIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdTIMEFORMAT "0x%016llX"
++# else
++# define gcdTIMEFORMAT ", 0x%016llX"
++# endif
++# else
++# define gcdTIMESIZE 0
++# define gcdTIMEFORMAT
++# endif
++
++#if gcdSHOW_LINE_NUMBER
++#if gcmISALIGNED(gcdOFFSET, 8)
++# define gcdNUMSIZE gcmSIZEOF(gctUINT64)
++# elif gcdOFFSET == 4
++# define gcdNUMSIZE gcmALIGNSIZE(4, gcmSIZEOF(gctUINT64))
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 8
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT64)
++# define gcdNUMFORMAT "%8llu"
++# else
++# define gcdNUMFORMAT ", %8llu"
++# endif
++# else
++# define gcdNUMSIZE 0
++# define gcdNUMFORMAT
++# endif
++
++#if gcdSHOW_PROCESS_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdPIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPIDFORMAT "pid=%5d"
++# else
++# define gcdPIDFORMAT ", pid=%5d"
++# endif
++# else
++# define gcdPIDSIZE 0
++# define gcdPIDFORMAT
++# endif
++
++#if gcdSHOW_THREAD_ID
++#if gcmISALIGNED(gcdOFFSET, 4)
++# define gcdTIDSIZE gcmSIZEOF(gctUINT32)
++# else
++# error "Unexpected offset value."
++# endif
++# undef gcdOFFSET
++# define gcdOFFSET 4
++#if !defined(gcdPREFIX_LEADER)
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdTIDFORMAT "tid=%5d"
++# else
++# define gcdTIDFORMAT ", tid=%5d"
++# endif
++# else
++# define gcdTIDSIZE 0
++# define gcdTIDFORMAT
++# endif
++
++# define gcdPREFIX_SIZE \
++ ( \
++ gcdTIMESIZE \
++ + gcdNUMSIZE \
++ + gcdPIDSIZE \
++ + gcdTIDSIZE \
++ )
++
++ static const char * _prefixFormat =
++ "["
++ gcdTIMEFORMAT
++ gcdNUMFORMAT
++ gcdPIDFORMAT
++ gcdTIDFORMAT
++ "] ";
++
++#else
++
++# define gcdPREFIX_LEADER gcmSIZEOF(gctUINT32)
++# define gcdPREFIX_SIZE 0
++
++#endif
++
++/* Assumed largest variable argument leader size. */
++#define gcdVARARG_LEADER gcmSIZEOF(gctUINT64)
++
++/* Alignnments. */
++#if gcdALIGNBYSIZE
++# define gcdPREFIX_ALIGNMENT gcdPREFIX_LEADER
++# define gcdVARARG_ALIGNMENT gcdVARARG_LEADER
++#else
++# define gcdPREFIX_ALIGNMENT 0
++# define gcdVARARG_ALIGNMENT 0
++#endif
++
++#if gcdBUFFERED_OUTPUT
++# define gcdOUTPUTPREFIX _AppendPrefix
++# define gcdOUTPUTSTRING _AppendString
++# define gcdOUTPUTCOPY _AppendCopy
++# define gcdOUTPUTBUFFER _AppendBuffer
++#else
++# define gcdOUTPUTPREFIX _PrintPrefix
++# define gcdOUTPUTSTRING _PrintString
++# define gcdOUTPUTCOPY _PrintString
++# define gcdOUTPUTBUFFER _PrintBuffer
++#endif
++
++/******************************************************************************\
++****************************** Private Structures ******************************
++\******************************************************************************/
++
++typedef enum _gceBUFITEM
++{
++ gceBUFITEM_NONE,
++ gcvBUFITEM_PREFIX,
++ gcvBUFITEM_STRING,
++ gcvBUFITEM_COPY,
++ gcvBUFITEM_BUFFER
++}
++gceBUFITEM;
++
++/* Common item head/buffer terminator. */
++typedef struct _gcsBUFITEM_HEAD * gcsBUFITEM_HEAD_PTR;
++typedef struct _gcsBUFITEM_HEAD
++{
++ gceBUFITEM type;
++}
++gcsBUFITEM_HEAD;
++
++/* String prefix (for ex. [ 1,tid=0x019A]) */
++typedef struct _gcsBUFITEM_PREFIX * gcsBUFITEM_PREFIX_PTR;
++typedef struct _gcsBUFITEM_PREFIX
++{
++ gceBUFITEM type;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_PREFIX;
++
++/* Buffered string. */
++typedef struct _gcsBUFITEM_STRING * gcsBUFITEM_STRING_PTR;
++typedef struct _gcsBUFITEM_STRING
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctCONST_STRING message;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_STRING;
++
++/* Buffered string (copy of the string is included with the record). */
++typedef struct _gcsBUFITEM_COPY * gcsBUFITEM_COPY_PTR;
++typedef struct _gcsBUFITEM_COPY
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gctPOINTER messageData;
++ gctUINT messageDataSize;
++}
++gcsBUFITEM_COPY;
++
++/* Memory buffer. */
++typedef struct _gcsBUFITEM_BUFFER * gcsBUFITEM_BUFFER_PTR;
++typedef struct _gcsBUFITEM_BUFFER
++{
++ gceBUFITEM type;
++ gctINT indent;
++ gceDUMP_BUFFER bufferType;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ gctUINT32 dmaAddress;
++#endif
++
++ gctUINT dataSize;
++ gctUINT32 address;
++#if gcdHAVEPREFIX
++ gctPOINTER prefixData;
++#endif
++}
++gcsBUFITEM_BUFFER;
++
++typedef struct _gcsBUFFERED_OUTPUT * gcsBUFFERED_OUTPUT_PTR;
++typedef struct _gcsBUFFERED_OUTPUT
++{
++#if gcdTHREAD_BUFFERS > 1
++ gctUINT32 threadID;
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ gctUINT64 lineNumber;
++#endif
++
++ gctINT indent;
++
++#if gcdBUFFERED_OUTPUT
++ gctINT start;
++ gctINT index;
++ gctINT count;
++ gctUINT8 buffer[gcdBUFFERED_SIZE];
++#endif
++
++ gcsBUFFERED_OUTPUT_PTR prev;
++ gcsBUFFERED_OUTPUT_PTR next;
++}
++gcsBUFFERED_OUTPUT;
++
++typedef gctUINT (* gcfPRINTSTRING) (
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++typedef gctINT (* gcfGETITEMSIZE) (
++ IN gcsBUFITEM_HEAD_PTR Item
++ );
++
++/******************************************************************************\
++******************************* Private Variables ******************************
++\******************************************************************************/
++
++static gcsBUFFERED_OUTPUT _outputBuffer[gcdTHREAD_BUFFERS];
++static gcsBUFFERED_OUTPUT_PTR _outputBufferHead = gcvNULL;
++static gcsBUFFERED_OUTPUT_PTR _outputBufferTail = gcvNULL;
++
++/******************************************************************************\
++****************************** Item Size Functions *****************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++static gctINT
++_GetTerminatorItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctINT
++_GetPrefixItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item = (gcsBUFITEM_PREFIX_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctINT
++_GetStringItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item = (gcsBUFITEM_STRING_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetCopyItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item = (gcsBUFITEM_COPY_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++ return vlen + item->messageDataSize;
++}
++
++static gctINT
++_GetBufferItemSize(
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ gctUINT vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gcsBUFITEM_BUFFER_PTR item = (gcsBUFITEM_BUFFER_PTR) Item;
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfGETITEMSIZE _itemSize[] =
++{
++ _GetTerminatorItemSize,
++ _GetPrefixItemSize,
++ _GetStringItemSize,
++ _GetCopyItemSize,
++ _GetBufferItemSize
++};
++#endif
++
++/******************************************************************************\
++******************************* Printing Functions *****************************
++\******************************************************************************/
++
++#if gcdDEBUG || gcdBUFFERED_OUTPUT
++static void
++_DirectPrint(
++ gctCONST_STRING Message,
++ ...
++ )
++{
++ gctINT len;
++ char buffer[768];
++ gctARGUMENTS arguments;
++
++ gcmkARGUMENTS_START(arguments, Message);
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), Message, arguments);
++ gcmkARGUMENTS_END(arguments);
++
++ buffer[len] = '\0';
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static int
++_AppendIndent(
++ IN gctINT Indent,
++ IN char * Buffer,
++ IN int BufferSize
++ )
++{
++ gctINT i;
++
++ gctINT len = 0;
++ gctINT indent = Indent % 40;
++
++ for (i = 0; i < indent; i += 1)
++ {
++ Buffer[len++] = ' ';
++ }
++
++ if (indent != Indent)
++ {
++ len += gcmkSPRINTF(
++ Buffer + len, BufferSize - len, " <%d> ", Indent
++ );
++
++ Buffer[len] = '\0';
++ }
++
++ return len;
++}
++
++#if gcdHAVEPREFIX
++static void
++_PrintPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Format the string. */
++ len = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, Data);
++ buffer[len] = '\0';
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++#endif
++
++static void
++_PrintString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ char buffer[768];
++ gctINT len;
++
++ /* Append the indent string. */
++ len = _AppendIndent(Indent, buffer, gcmSIZEOF(buffer));
++
++ /* Format the string. */
++ len += gcmkVSPRINTF(buffer + len, gcmSIZEOF(buffer) - len, Message, Data);
++ buffer[len] = '\0';
++
++ /* Add end-of-line if missing. */
++ if (buffer[len - 1] != '\n')
++ {
++ buffer[len++] = '\n';
++ buffer[len] = '\0';
++ }
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++}
++
++static void
++_PrintBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++ static gctCONST_STRING _titleString[] =
++ {
++ "CONTEXT BUFFER",
++ "USER COMMAND BUFFER",
++ "KERNEL COMMAND BUFFER",
++ "LINK BUFFER",
++ "WAIT LINK BUFFER",
++ ""
++ };
++
++ static const gctINT COLUMN_COUNT = 8;
++
++ gctUINT i, count, column, address;
++ gctUINT32_PTR data;
++ gctCHAR buffer[768];
++ gctUINT indent, len;
++ gctBOOL command;
++
++ /* Append space for the prefix. */
++#if gcdHAVEPREFIX
++ indent = gcmkVSPRINTF(buffer, gcmSIZEOF(buffer), _prefixFormat, PrefixData);
++ buffer[indent] = '\0';
++#else
++ indent = 0;
++#endif
++
++ /* Append the indent string. */
++ indent += _AppendIndent(
++ Indent, buffer + indent, gcmSIZEOF(buffer) - indent
++ );
++
++ switch (Type)
++ {
++ case gceDUMP_BUFFER_CONTEXT:
++ case gceDUMP_BUFFER_USER:
++ case gceDUMP_BUFFER_KERNEL:
++ case gceDUMP_BUFFER_LINK:
++ case gceDUMP_BUFFER_WAITLINK:
++ /* Form and print the title string. */
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "%s%s\n", _titleString[Type],
++ ((DmaAddress >= Address) && (DmaAddress < Address + DataSize))
++ ? " (CURRENT)" : ""
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++
++ /* This is a command buffer. */
++ command = gcvTRUE;
++ break;
++
++ case gceDUMP_BUFFER_FROM_USER:
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++
++ /* No title. */
++ break;
++
++ default:
++ gcmDBGASSERT(gcvFALSE, "%s", "invalid buffer type");
++
++ /* This is not a command buffer. */
++ command = gcvFALSE;
++ }
++
++ /* Overwrite the prefix with spaces. */
++ for (i = 0; i < indent; i += 1)
++ {
++ buffer[i] = ' ';
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ gcmkSPRINTF2(
++ buffer + indent, gcmSIZEOF(buffer) - indent,
++ "@[kernel.command %08X %08X\n", Address, DataSize
++ );
++
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Terminate the string. */
++ buffer[indent] = '\0';
++ }
++
++ /* Get initial address. */
++ address = Address;
++
++ /* Cast the data pointer. */
++ data = (gctUINT32_PTR) Data;
++
++ /* Compute the number of double words. */
++ count = DataSize / gcmSIZEOF(gctUINT32);
++
++ /* Print the buffer. */
++ for (i = 0, len = indent, column = 0; i < count; i += 1)
++ {
++ /* Append the address. */
++ if (column == 0)
++ {
++ len += gcmkSPRINTF(
++ buffer + len, gcmSIZEOF(buffer) - len, "0x%08X:", address
++ );
++ }
++
++ /* Append the data value. */
++ len += gcmkSPRINTF2(
++ buffer + len, gcmSIZEOF(buffer) - len, "%c%08X",
++ (address == DmaAddress)? '>' : ' ', data[i]
++ );
++
++ buffer[len] = '\0';
++
++ /* Update the address. */
++ address += gcmSIZEOF(gctUINT32);
++
++ /* Advance column count. */
++ column += 1;
++
++ /* End of line? */
++ if ((column % COLUMN_COUNT) == 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++
++ /* Reset. */
++ len = indent;
++ column = 0;
++ }
++ }
++
++ /* Print the last partial string. */
++ if (column != 0)
++ {
++ /* Append EOL. */
++ gcmkSTRCAT(buffer + len, gcmSIZEOF(buffer) - len, "\n");
++
++ /* Print the string. */
++ gcmkOUTPUT_STRING(buffer);
++ }
++
++ /* Form and print the opening string. */
++ if (command)
++ {
++ buffer[indent] = '\0';
++ gcmkSTRCAT(buffer, gcmSIZEOF(buffer), "] -- command\n");
++ gcmkOUTPUT_STRING(buffer);
++ }
++}
++
++#if gcdBUFFERED_OUTPUT
++static gctUINT
++_PrintNone(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_HEAD);
++}
++
++static gctUINT
++_PrintPrefixWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gcsBUFITEM_PREFIX_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_PREFIX_PTR) Item;
++
++ /* Print the message. */
++ _PrintPrefix(OutputBuffer, item->prefixData);
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE;
++#else
++ return gcmSIZEOF(gcsBUFITEM_PREFIX);
++#endif
++}
++
++static gctUINT
++_PrintStringWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_STRING_PTR item;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_STRING_PTR) Item;
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, item->message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintCopyWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++ gcsBUFITEM_COPY_PTR item;
++ gctCONST_STRING message;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_COPY_PTR) Item;
++
++ /* Determine the string pointer. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Print the message. */
++ _PrintString(
++ OutputBuffer,
++ item->indent, message, item->messageDataSize, item->messageData
++ );
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->messageData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + item->messageDataSize;
++}
++
++static gctUINT
++_PrintBufferWrapper(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gcsBUFITEM_HEAD_PTR Item
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctPOINTER data;
++ gctUINT vlen;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Compute the data address. */
++ data = ((gctUINT8_PTR) item->prefixData) + gcdPREFIX_SIZE;
++
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, item->prefixData,
++ data, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Compute the size of the variable portion of the structure. */
++ vlen = ((gctUINT8_PTR) item->prefixData) - ((gctUINT8_PTR) item);
++
++ /* Return the size of the node. */
++ return vlen + gcdPREFIX_SIZE + item->dataSize;
++#else
++ gctUINT32 dmaAddress;
++ gcsBUFITEM_BUFFER_PTR item;
++
++ /* Get access to the data. */
++ item = (gcsBUFITEM_BUFFER_PTR) Item;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ dmaAddress = item->dmaAddress;
++#else
++ dmaAddress = 0xFFFFFFFF;
++#endif
++
++ if (dmaAddress != 0)
++ {
++ /* Print buffer. */
++ _PrintBuffer(
++ OutputBuffer,
++ item->indent, gcvNULL,
++ item + 1, item->address, item->dataSize,
++ item->bufferType, dmaAddress
++ );
++ }
++
++ /* Return the size of the node. */
++ return gcmSIZEOF(gcsBUFITEM_BUFFER) + item->dataSize;
++#endif
++}
++
++static gcfPRINTSTRING _printArray[] =
++{
++ _PrintNone,
++ _PrintPrefixWrapper,
++ _PrintStringWrapper,
++ _PrintCopyWrapper,
++ _PrintBufferWrapper
++};
++#endif
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++#if gcdBUFFERED_OUTPUT
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++static gcsBUFITEM_BUFFER_PTR
++_FindCurrentDMABuffer(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR dmaCurrent;
++
++ /* Reset the current buffer. */
++ dmaCurrent = gcvNULL;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ if ((DmaAddress >= buffer->address) &&
++ (DmaAddress < buffer->address + buffer->dataSize))
++ {
++ dmaCurrent = buffer;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Return result. */
++ return dmaCurrent;
++}
++
++static void
++_EnableAllDMABuffers(
++ void
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ gcsBUFITEM_BUFFER_PTR buffer = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Enable the buffer. */
++ buffer->dmaAddress = ~0U;
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++}
++
++static void
++_EnableDMABuffers(
++ gctUINT32 DmaAddress,
++ gcsBUFITEM_BUFFER_PTR CurrentDMABuffer
++ )
++{
++ gctINT i, skip, index;
++ gcsBUFITEM_HEAD_PTR item;
++ gcsBUFITEM_BUFFER_PTR buffers[gcdDMA_BUFFER_COUNT];
++
++ /* Reset buffer pointers. */
++ gckOS_ZeroMemory(buffers, gcmSIZEOF(buffers));
++
++ /* Set the current buffer index. */
++ index = -1;
++
++ /* Get the first stored item. */
++ item = (gcsBUFITEM_HEAD_PTR) &_outputBufferHead->buffer[_outputBufferHead->start];
++
++ /* Run through all items until the current DMA buffer is found. */
++ for (i = 0; i < _outputBufferHead->count; i += 1)
++ {
++ /* Buffer item? */
++ if (item->type == gcvBUFITEM_BUFFER)
++ {
++ /* Advance the index. */
++ index = (index + 1) % gcdDMA_BUFFER_COUNT;
++
++ /* Add to the buffer array. */
++ buffers[index] = (gcsBUFITEM_BUFFER_PTR) item;
++
++ /* Stop if this is the current DMA buffer. */
++ if ((gcsBUFITEM_BUFFER_PTR) item == CurrentDMABuffer)
++ {
++ break;
++ }
++ }
++
++ /* Get the item size and skip it. */
++ skip = (* _itemSize[item->type]) (item);
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ /* End of the buffer? Wrap around. */
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) _outputBufferHead->buffer;
++ }
++ }
++
++ /* Enable the found buffers. */
++ gcmDBGASSERT(index != -1, "%d", index);
++
++ for (i = 0; i < gcdDMA_BUFFER_COUNT; i += 1)
++ {
++ if (buffers[index] == gcvNULL)
++ {
++ break;
++ }
++
++ buffers[index]->dmaAddress = DmaAddress;
++
++ index -= 1;
++
++ if (index == -1)
++ {
++ index = gcdDMA_BUFFER_COUNT - 1;
++ }
++ }
++}
++#endif
++
++static void
++_Flush(
++ gctUINT32 DmaAddress
++ )
++{
++ gctINT i, skip;
++ gcsBUFITEM_HEAD_PTR item;
++
++ gcsBUFFERED_OUTPUT_PTR outputBuffer = _outputBufferHead;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ if ((outputBuffer != gcvNULL) && (outputBuffer->count != 0))
++ {
++ /* Find the current DMA buffer. */
++ gcsBUFITEM_BUFFER_PTR dmaCurrent = _FindCurrentDMABuffer(DmaAddress);
++
++ /* Was the current buffer found? */
++ if (dmaCurrent == gcvNULL)
++ {
++ /* No, print all buffers. */
++ _EnableAllDMABuffers();
++ }
++ else
++ {
++ /* Yes, enable only specified number of buffers. */
++ _EnableDMABuffers(DmaAddress, dmaCurrent);
++ }
++ }
++#endif
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->count != 0)
++ {
++ _DirectPrint("********************************************************************************\n");
++ _DirectPrint("FLUSHING DEBUG OUTPUT BUFFER (%d elements).\n", outputBuffer->count);
++ _DirectPrint("********************************************************************************\n");
++
++ item = (gcsBUFITEM_HEAD_PTR) &outputBuffer->buffer[outputBuffer->start];
++
++ for (i = 0; i < outputBuffer->count; i += 1)
++ {
++ skip = (* _printArray[item->type]) (outputBuffer, item);
++
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) outputBuffer->buffer;
++ }
++ }
++
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++}
++
++static gcsBUFITEM_HEAD_PTR
++_AllocateItem(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Size
++ )
++{
++ gctINT skip;
++ gcsBUFITEM_HEAD_PTR item, next;
++
++#if gcdENABLE_OVERFLOW
++ if (
++ (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ ||
++ (
++ (OutputBuffer->index < OutputBuffer->start) &&
++ (OutputBuffer->index + Size >= OutputBuffer->start)
++ )
++ )
++ {
++ if (OutputBuffer->index + Size >= gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ if (OutputBuffer->index < OutputBuffer->start)
++ {
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (item->type != gceBUFITEM_NONE)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++ }
++
++ OutputBuffer->start = 0;
++ }
++
++ OutputBuffer->index = 0;
++ }
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->start];
++
++ while (OutputBuffer->start - OutputBuffer->index <= Size)
++ {
++ skip = (* _itemSize[item->type]) (item);
++
++ OutputBuffer->start += skip;
++ OutputBuffer->count -= 1;
++
++ item->type = gceBUFITEM_NONE;
++ item = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + skip);
++
++ if (item->type == gceBUFITEM_NONE)
++ {
++ OutputBuffer->start = 0;
++ break;
++ }
++ }
++ }
++#else
++ if (OutputBuffer->index + Size > gcdBUFFERED_SIZE - gcmSIZEOF(gcsBUFITEM_HEAD))
++ {
++ _DirectPrint("\nMessage buffer full; forcing message flush.\n\n");
++ _Flush(~0U);
++ }
++#endif
++
++ item = (gcsBUFITEM_HEAD_PTR) &OutputBuffer->buffer[OutputBuffer->index];
++
++ OutputBuffer->index += Size;
++ OutputBuffer->count += 1;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) item + Size);
++ next->type = gceBUFITEM_NONE;
++
++ return item;
++}
++
++#if gcdALIGNBYSIZE
++static void
++_FreeExtraSpace(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Item,
++ IN gctINT ItemSize,
++ IN gctINT FreeSize
++ )
++{
++ gcsBUFITEM_HEAD_PTR next;
++
++ OutputBuffer->index -= FreeSize;
++
++ next = (gcsBUFITEM_HEAD_PTR) ((gctUINT8_PTR) Item + ItemSize);
++ next->type = gceBUFITEM_NONE;
++}
++#endif
++
++#if gcdHAVEPREFIX
++static void
++_AppendPrefix(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_PREFIX_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_PREFIX)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_PREFIX_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_PREFIX;
++ item->prefixData = prefixData;
++
++ /* Copy argument value. */
++ memcpy(prefixData, Data, gcdPREFIX_SIZE);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_PREFIX) + gcdPREFIX_SIZE + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++#endif
++
++static void
++_AppendString(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_STRING_PTR item;
++ gctINT allocSize;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_STRING)
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_STRING_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) (item + 1);
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_STRING;
++ item->indent = Indent;
++ item->message = Message;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size = gcmSIZEOF(gcsBUFITEM_STRING) + ArgumentSize + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendCopy(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctCONST_STRING Message,
++ IN gctUINT ArgumentSize,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR messageData;
++ gcsBUFITEM_COPY_PTR item;
++ gctINT allocSize;
++ gctINT messageLength;
++ gctCONST_STRING message;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ /* Get the length of the string. */
++ messageLength = strlen(Message) + 1;
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + gcdVARARG_ALIGNMENT;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_COPY_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Determine the message placement. */
++ message = (gctCONST_STRING) (item + 1);
++
++ /* Compute the initial message data pointer. */
++ messageData = (gctUINT8_PTR) message + messageLength;
++
++ /* Align the data pointer as necessary. */
++#if gcdALIGNBYSIZE
++ if (ArgumentSize == 0)
++ {
++ alignment = 0;
++ }
++ else
++ {
++ alignment = gcmPTRALIGNMENT(messageData, gcdVARARG_ALIGNMENT);
++ messageData += alignment;
++ }
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_COPY;
++ item->indent = Indent;
++ item->messageData = messageData;
++ item->messageDataSize = ArgumentSize;
++
++ /* Copy the message. */
++ memcpy((gctPOINTER) message, Message, messageLength);
++
++ /* Copy argument value. */
++ if (ArgumentSize != 0)
++ {
++ memcpy(messageData, Data, ArgumentSize);
++ }
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_COPY)
++ + messageLength
++ + ArgumentSize
++ + alignment;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++}
++
++static void
++_AppendBuffer(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctINT Indent,
++ IN gctPOINTER PrefixData,
++ IN gctPOINTER Data,
++ IN gctUINT Address,
++ IN gctUINT DataSize,
++ IN gceDUMP_BUFFER Type,
++ IN gctUINT32 DmaAddress
++ )
++{
++#if gcdHAVEPREFIX
++ gctUINT8_PTR prefixData;
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT allocSize;
++ gctPOINTER data;
++
++#if gcdALIGNBYSIZE
++ gctUINT alignment;
++ gctINT size, freeSize;
++#endif
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ allocSize
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + gcdPREFIX_ALIGNMENT
++ + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, allocSize);
++
++ /* Compute the initial prefix data pointer. */
++ prefixData = (gctUINT8_PTR) (item + 1);
++
++#if gcdALIGNBYSIZE
++ /* Align the data pointer as necessary. */
++ alignment = gcmPTRALIGNMENT(prefixData, gcdPREFIX_ALIGNMENT);
++ prefixData += alignment;
++#endif
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->bufferType = Type;
++ item->dataSize = DataSize;
++ item->address = Address;
++ item->prefixData = prefixData;
++
++#if gcdDMA_BUFFER_COUNT && (gcdTHREAD_BUFFERS == 1)
++ item->dmaAddress = DmaAddress;
++#endif
++
++ /* Copy prefix data. */
++ memcpy(prefixData, PrefixData, gcdPREFIX_SIZE);
++
++ /* Compute the data pointer. */
++ data = prefixData + gcdPREFIX_SIZE;
++
++ /* Copy argument value. */
++ memcpy(data, Data, DataSize);
++
++#if gcdALIGNBYSIZE
++ /* Compute the actual node size. */
++ size
++ = gcmSIZEOF(gcsBUFITEM_BUFFER)
++ + gcdPREFIX_SIZE
++ + alignment
++ + DataSize;
++
++ /* Free extra memory if any. */
++ freeSize = allocSize - size;
++ if (freeSize != 0)
++ {
++ _FreeExtraSpace(OutputBuffer, item, size, freeSize);
++ }
++#endif
++#else
++ gcsBUFITEM_BUFFER_PTR item;
++ gctINT size;
++
++ gcmDBGASSERT(DataSize != 0, "%d", DataSize);
++ gcmDBGASSERT(Data != gcvNULL, "%p", Data);
++
++ /* Determine the maximum item size. */
++ size = gcmSIZEOF(gcsBUFITEM_BUFFER) + DataSize;
++
++ /* Allocate prefix item. */
++ item = (gcsBUFITEM_BUFFER_PTR) _AllocateItem(OutputBuffer, size);
++
++ /* Set item data. */
++ item->type = gcvBUFITEM_BUFFER;
++ item->indent = Indent;
++ item->dataSize = DataSize;
++ item->address = Address;
++
++ /* Copy argument value. */
++ memcpy(item + 1, Data, DataSize);
++#endif
++}
++#endif
++
++static gcmINLINE void
++_InitBuffers(
++ void
++ )
++{
++ int i;
++
++ if (_outputBufferHead == gcvNULL)
++ {
++ for (i = 0; i < gcdTHREAD_BUFFERS; i += 1)
++ {
++ if (_outputBufferTail == gcvNULL)
++ {
++ _outputBufferHead = &_outputBuffer[i];
++ }
++ else
++ {
++ _outputBufferTail->next = &_outputBuffer[i];
++ }
++
++#if gcdTHREAD_BUFFERS > 1
++ _outputBuffer[i].threadID = ~0U;
++#endif
++
++ _outputBuffer[i].prev = _outputBufferTail;
++ _outputBuffer[i].next = gcvNULL;
++
++ _outputBufferTail = &_outputBuffer[i];
++ }
++ }
++}
++
++static gcmINLINE gcsBUFFERED_OUTPUT_PTR
++_GetOutputBuffer(
++ void
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++
++#if gcdTHREAD_BUFFERS > 1
++ /* Get the current thread ID. */
++ gctUINT32 ThreadID = gcmkGETTHREADID();
++
++ /* Locate the output buffer for the thread. */
++ outputBuffer = _outputBufferHead;
++
++ while (outputBuffer != gcvNULL)
++ {
++ if (outputBuffer->threadID == ThreadID)
++ {
++ break;
++ }
++
++ outputBuffer = outputBuffer->next;
++ }
++
++ /* No matching buffer found? */
++ if (outputBuffer == gcvNULL)
++ {
++ /* Get the tail for the buffer. */
++ outputBuffer = _outputBufferTail;
++
++ /* Move it to the head. */
++ _outputBufferTail = _outputBufferTail->prev;
++ _outputBufferTail->next = gcvNULL;
++
++ outputBuffer->prev = gcvNULL;
++ outputBuffer->next = _outputBufferHead;
++
++ _outputBufferHead->prev = outputBuffer;
++ _outputBufferHead = outputBuffer;
++
++ /* Reset the buffer. */
++ outputBuffer->threadID = ThreadID;
++#if gcdBUFFERED_OUTPUT
++ outputBuffer->start = 0;
++ outputBuffer->index = 0;
++ outputBuffer->count = 0;
++#endif
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber = 0;
++#endif
++ }
++#else
++ outputBuffer = _outputBufferHead;
++#endif
++
++ return outputBuffer;
++}
++
++static gcmINLINE int _GetArgumentSize(
++ IN gctCONST_STRING Message
++ )
++{
++ int i, count;
++
++ gcmDBGASSERT(Message != gcvNULL, "%p", Message);
++
++ for (i = 0, count = 0; Message[i]; i += 1)
++ {
++ if (Message[i] == '%')
++ {
++ count += 1;
++ }
++ }
++
++ return count * gcmSIZEOF(gctUINT32);
++}
++
++#if gcdHAVEPREFIX
++static void
++_InitPrefixData(
++ IN gcsBUFFERED_OUTPUT_PTR OutputBuffer,
++ IN gctPOINTER Data
++ )
++{
++ gctUINT8_PTR data = (gctUINT8_PTR) Data;
++
++#if gcdSHOW_TIME
++ {
++ gctUINT64 time;
++ gckOS_GetProfileTick(&time);
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = time;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_LINE_NUMBER
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT64));
++ * ((gctUINT64_PTR) data) = OutputBuffer->lineNumber;
++ data += gcmSIZEOF(gctUINT64);
++ }
++#endif
++
++#if gcdSHOW_PROCESS_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETPROCESSID();
++ data += gcmSIZEOF(gctUINT32);
++ }
++#endif
++
++#if gcdSHOW_THREAD_ID
++ {
++ gcmkALIGNPTR(gctUINT8_PTR, data, gcmSIZEOF(gctUINT32));
++ * ((gctUINT32_PTR) data) = gcmkGETTHREADID();
++ }
++#endif
++}
++#endif
++
++static void
++_Print(
++ IN gctUINT ArgumentSize,
++ IN gctBOOL CopyMessage,
++ IN gctCONST_STRING Message,
++ IN gctARGUMENTS Arguments
++ )
++{
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++ gcmkDECLARE_LOCK(lockHandle);
++
++ gcmkLOCKSECTION(lockHandle);
++
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Print prefix. */
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print the prefix. */
++ gcdOUTPUTPREFIX(outputBuffer, alignedPrefixData);
++ }
++#endif
++
++ /* Form the indent string. */
++ if (strncmp(Message, "--", 2) == 0)
++ {
++ outputBuffer->indent -= 2;
++ }
++
++ /* Print the message. */
++ if (CopyMessage)
++ {
++ gcdOUTPUTCOPY(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, * (gctPOINTER *) &Arguments
++ );
++ }
++ else
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Message, ArgumentSize, * (gctPOINTER *) &Arguments
++ );
++ }
++
++ /* Check increasing indent. */
++ if (strncmp(Message, "++", 2) == 0)
++ {
++ outputBuffer->indent += 2;
++ }
++
++ gcmkUNLOCKSECTION(lockHandle);
++}
++
++
++/******************************************************************************\
++********************************* Debug Macros *********************************
++\******************************************************************************/
++
++#ifdef __QNXNTO__
++
++extern volatile unsigned g_nQnxInIsrs;
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ if (atomic_add_value(&g_nQnxInIsrs, 1) == 0) \
++ { \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, __arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++ } \
++ atomic_sub(&g_nQnxInIsrs, 1); \
++}
++
++#else
++
++#define gcmDEBUGPRINT(ArgumentSize, CopyMessage, Message) \
++{ \
++ gctARGUMENTS __arguments__; \
++ gcmkARGUMENTS_START(__arguments__, Message); \
++ _Print(ArgumentSize, CopyMessage, Message, __arguments__); \
++ gcmkARGUMENTS_END(__arguments__); \
++}
++
++#endif
++
++/******************************************************************************\
++********************************** Debug Code **********************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Print
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_PrintN
++**
++** Send a message to the debugger.
++**
++** INPUT:
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyPrint
++**
++** Send a message to the debugger. If in buffered output mode, the entire
++** message will be copied into the buffer instead of using the pointer to
++** the string.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvTRUE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DumpBuffer
++**
++** Print the contents of the specified buffer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Buffer
++** Pointer to the buffer to print.
++**
++** gctUINT Size
++** Size of the buffer.
++**
++** gceDUMP_BUFFER Type
++** Buffer type.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ )
++{
++ gctUINT32 address;
++ gcsBUFFERED_OUTPUT_PTR outputBuffer;
++ static gctBOOL userLocked;
++ gctCHAR *buffer = (gctCHAR*)Buffer;
++
++ gcmkDECLARE_LOCK(lockHandle);
++
++ /* Request lock when not coming from user,
++ or coming from user and not yet locked
++ and message is starting with @[. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ if ((Size > 2)
++ && (buffer[0] == '@')
++ && (buffer[1] == '['))
++ {
++ /* Beginning of a user dump. */
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvTRUE;
++ }
++ /* Else, let it pass through. */
++ }
++ else
++ {
++ gcmkLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++
++ if (Buffer != gcvNULL)
++ {
++ /* Initialize output buffer list. */
++ _InitBuffers();
++
++ /* Locate the proper output buffer. */
++ outputBuffer = _GetOutputBuffer();
++
++ /* Update the line number. */
++#if gcdSHOW_LINE_NUMBER
++ outputBuffer->lineNumber += 1;
++#endif
++
++ /* Get the physical address of the buffer. */
++ if (Type != gceDUMP_BUFFER_FROM_USER)
++ {
++ gcmkVERIFY_OK(gckOS_GetPhysicalAddress(Os, Buffer, &address));
++ }
++ else
++ {
++ address = 0;
++ }
++
++#if gcdHAVEPREFIX
++ {
++ gctUINT8_PTR alignedPrefixData;
++ gctUINT8 prefixData[gcdPREFIX_SIZE + gcdPREFIX_ALIGNMENT];
++
++ /* Compute aligned pointer. */
++ alignedPrefixData = prefixData;
++ gcmkALIGNPTR(gctUINT8_PTR, alignedPrefixData, gcdPREFIX_ALIGNMENT);
++
++ /* Initialize the prefix data. */
++ _InitPrefixData(outputBuffer, alignedPrefixData);
++
++ /* Print/schedule the buffer. */
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ alignedPrefixData, Buffer, address, Size, Type, 0
++ );
++ }
++#else
++ /* Print/schedule the buffer. */
++ if (Type == gceDUMP_BUFFER_FROM_USER)
++ {
++ gcdOUTPUTSTRING(
++ outputBuffer, outputBuffer->indent,
++ Buffer, 0, gcvNULL
++ );
++ }
++ else
++ {
++ gcdOUTPUTBUFFER(
++ outputBuffer, outputBuffer->indent,
++ gcvNULL, Buffer, address, Size, Type, 0
++ );
++ }
++#endif
++ }
++
++ /* Unlock when not coming from user,
++ or coming from user and not yet locked. */
++ if (userLocked)
++ {
++ if ((Size > 4)
++ && (buffer[0] == ']')
++ && (buffer[1] == ' ')
++ && (buffer[2] == '-')
++ && (buffer[3] == '-'))
++ {
++ /* End of a user dump. */
++ gcmkUNLOCKSECTION(lockHandle);
++ userLocked = gcvFALSE;
++ }
++ /* Else, let it pass through, don't unlock. */
++ }
++ else
++ {
++ gcmkUNLOCKSECTION(lockHandle);
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTrace
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceN
++**
++** Send a leveled message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level of message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if (Level > _debugLevel)
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZone
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugTraceZoneN
++**
++** Send a leveled and zoned message to the debugger.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** Debug level for message.
++**
++** gctUINT32 Zone
++** Debug zone for message.
++**
++** gctUINT ArgumentSize
++** The size of the optional arguments in bytes.
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ if ((Level > _debugLevel) || !(Zone & _debugZones))
++ {
++ return;
++ }
++
++ gcmDEBUGPRINT(ArgumentSize, gcvFALSE, Message);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugBreak
++**
++** Break into the debugger.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugBreak(
++ void
++ )
++{
++ gckOS_DebugTrace(gcvLEVEL_ERROR, "%s(%d)", __FUNCTION__, __LINE__);
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFatal
++**
++** Send a message to the debugger and break into the debugger.
++**
++** INPUT:
++**
++** gctCONST_STRING Message
++** Pointer to message.
++**
++** ...
++** Optional arguments.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++{
++ gcmkPRINT_VERSION();
++ gcmDEBUGPRINT(_GetArgumentSize(Message), gcvFALSE, Message);
++
++ /* Break into the debugger. */
++ gckOS_DebugBreak();
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevel
++**
++** Set the debug level.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ )
++{
++ _debugLevel = Level;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZone
++**
++** Set the debug zone.
++**
++** INPUT:
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ )
++{
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugLevelZone
++**
++** Set the debug level and zone.
++**
++** INPUT:
++**
++** gctUINT32 Level
++** New debug level.
++**
++** gctUINT32 Zone
++** New debug zone.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ )
++{
++ _debugLevel = Level;
++ _debugZones = Zone;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetDebugZones
++**
++** Enable or disable debug zones.
++**
++** INPUT:
++**
++** gctUINT32 Zones
++** Debug zones to enable or disable.
++**
++** gctBOOL Enable
++** Set to gcvTRUE to enable the zones (or the Zones with the current
++** zones) or gcvFALSE to disable the specified Zones.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ )
++{
++ if (Enable)
++ {
++ /* Enable the zones. */
++ _debugZones |= Zones;
++ }
++ else
++ {
++ /* Disable the zones. */
++ _debugZones &= ~Zones;
++ }
++}
++
++/*******************************************************************************
++**
++** gckOS_Verify
++**
++** Called to verify the result of a function call.
++**
++** INPUT:
++**
++** gceSTATUS Status
++** Function call result.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ )
++{
++ _lastError = status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DebugFlush
++**
++** Force messages to be flushed out.
++**
++** INPUT:
++**
++** gctCONST_STRING CallerName
++** Name of the caller function.
++**
++** gctUINT LineNumber
++** Line number of the caller.
++**
++** gctUINT32 DmaAddress
++** The current DMA address or ~0U to ignore.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ )
++{
++#if gcdBUFFERED_OUTPUT
++ _DirectPrint("\nFlush requested by %s(%d).\n\n", CallerName, LineNumber);
++ _Flush(DmaAddress);
++#endif
++}
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ )
++{
++ switch (status)
++ {
++ case gcvSTATUS_OK:
++ return "gcvSTATUS_OK";
++ case gcvSTATUS_TRUE:
++ return "gcvSTATUS_TRUE";
++ case gcvSTATUS_NO_MORE_DATA:
++ return "gcvSTATUS_NO_MORE_DATA";
++ case gcvSTATUS_CACHED:
++ return "gcvSTATUS_CACHED";
++ case gcvSTATUS_MIPMAP_TOO_LARGE:
++ return "gcvSTATUS_MIPMAP_TOO_LARGE";
++ case gcvSTATUS_NAME_NOT_FOUND:
++ return "gcvSTATUS_NAME_NOT_FOUND";
++ case gcvSTATUS_NOT_OUR_INTERRUPT:
++ return "gcvSTATUS_NOT_OUR_INTERRUPT";
++ case gcvSTATUS_MISMATCH:
++ return "gcvSTATUS_MISMATCH";
++ case gcvSTATUS_MIPMAP_TOO_SMALL:
++ return "gcvSTATUS_MIPMAP_TOO_SMALL";
++ case gcvSTATUS_LARGER:
++ return "gcvSTATUS_LARGER";
++ case gcvSTATUS_SMALLER:
++ return "gcvSTATUS_SMALLER";
++ case gcvSTATUS_CHIP_NOT_READY:
++ return "gcvSTATUS_CHIP_NOT_READY";
++ case gcvSTATUS_NEED_CONVERSION:
++ return "gcvSTATUS_NEED_CONVERSION";
++ case gcvSTATUS_SKIP:
++ return "gcvSTATUS_SKIP";
++ case gcvSTATUS_DATA_TOO_LARGE:
++ return "gcvSTATUS_DATA_TOO_LARGE";
++ case gcvSTATUS_INVALID_CONFIG:
++ return "gcvSTATUS_INVALID_CONFIG";
++ case gcvSTATUS_CHANGED:
++ return "gcvSTATUS_CHANGED";
++ case gcvSTATUS_NOT_SUPPORT_DITHER:
++ return "gcvSTATUS_NOT_SUPPORT_DITHER";
++
++ case gcvSTATUS_INVALID_ARGUMENT:
++ return "gcvSTATUS_INVALID_ARGUMENT";
++ case gcvSTATUS_INVALID_OBJECT:
++ return "gcvSTATUS_INVALID_OBJECT";
++ case gcvSTATUS_OUT_OF_MEMORY:
++ return "gcvSTATUS_OUT_OF_MEMORY";
++ case gcvSTATUS_MEMORY_LOCKED:
++ return "gcvSTATUS_MEMORY_LOCKED";
++ case gcvSTATUS_MEMORY_UNLOCKED:
++ return "gcvSTATUS_MEMORY_UNLOCKED";
++ case gcvSTATUS_HEAP_CORRUPTED:
++ return "gcvSTATUS_HEAP_CORRUPTED";
++ case gcvSTATUS_GENERIC_IO:
++ return "gcvSTATUS_GENERIC_IO";
++ case gcvSTATUS_INVALID_ADDRESS:
++ return "gcvSTATUS_INVALID_ADDRESS";
++ case gcvSTATUS_CONTEXT_LOSSED:
++ return "gcvSTATUS_CONTEXT_LOSSED";
++ case gcvSTATUS_TOO_COMPLEX:
++ return "gcvSTATUS_TOO_COMPLEX";
++ case gcvSTATUS_BUFFER_TOO_SMALL:
++ return "gcvSTATUS_BUFFER_TOO_SMALL";
++ case gcvSTATUS_INTERFACE_ERROR:
++ return "gcvSTATUS_INTERFACE_ERROR";
++ case gcvSTATUS_NOT_SUPPORTED:
++ return "gcvSTATUS_NOT_SUPPORTED";
++ case gcvSTATUS_MORE_DATA:
++ return "gcvSTATUS_MORE_DATA";
++ case gcvSTATUS_TIMEOUT:
++ return "gcvSTATUS_TIMEOUT";
++ case gcvSTATUS_OUT_OF_RESOURCES:
++ return "gcvSTATUS_OUT_OF_RESOURCES";
++ case gcvSTATUS_INVALID_DATA:
++ return "gcvSTATUS_INVALID_DATA";
++ case gcvSTATUS_INVALID_MIPMAP:
++ return "gcvSTATUS_INVALID_MIPMAP";
++ case gcvSTATUS_NOT_FOUND:
++ return "gcvSTATUS_NOT_FOUND";
++ case gcvSTATUS_NOT_ALIGNED:
++ return "gcvSTATUS_NOT_ALIGNED";
++ case gcvSTATUS_INVALID_REQUEST:
++ return "gcvSTATUS_INVALID_REQUEST";
++ case gcvSTATUS_GPU_NOT_RESPONDING:
++ return "gcvSTATUS_GPU_NOT_RESPONDING";
++ case gcvSTATUS_TIMER_OVERFLOW:
++ return "gcvSTATUS_TIMER_OVERFLOW";
++ case gcvSTATUS_VERSION_MISMATCH:
++ return "gcvSTATUS_VERSION_MISMATCH";
++ case gcvSTATUS_LOCKED:
++ return "gcvSTATUS_LOCKED";
++
++ /* Linker errors. */
++ case gcvSTATUS_GLOBAL_TYPE_MISMATCH:
++ return "gcvSTATUS_GLOBAL_TYPE_MISMATCH";
++ case gcvSTATUS_TOO_MANY_ATTRIBUTES:
++ return "gcvSTATUS_TOO_MANY_ATTRIBUTES";
++ case gcvSTATUS_TOO_MANY_UNIFORMS:
++ return "gcvSTATUS_TOO_MANY_UNIFORMS";
++ case gcvSTATUS_TOO_MANY_VARYINGS:
++ return "gcvSTATUS_TOO_MANY_VARYINGS";
++ case gcvSTATUS_UNDECLARED_VARYING:
++ return "gcvSTATUS_UNDECLARED_VARYING";
++ case gcvSTATUS_VARYING_TYPE_MISMATCH:
++ return "gcvSTATUS_VARYING_TYPE_MISMATCH";
++ case gcvSTATUS_MISSING_MAIN:
++ return "gcvSTATUS_MISSING_MAIN";
++ case gcvSTATUS_NAME_MISMATCH:
++ return "gcvSTATUS_NAME_MISMATCH";
++ case gcvSTATUS_INVALID_INDEX:
++ return "gcvSTATUS_INVALID_INDEX";
++ default:
++ return "nil";
++ }
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_event.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_event.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_event.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_event.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,2898 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++#include "gc_hal_kernel_buffer.h"
++
++#ifdef __QNXNTO__
++#include <atomic.h>
++#include "gc_hal_kernel_qnx.h"
++#endif
++
++#define _GC_OBJ_ZONE gcvZONE_EVENT
++
++#define gcdEVENT_ALLOCATION_COUNT (4096 / gcmSIZEOF(gcsHAL_INTERFACE))
++#define gcdEVENT_MIN_THRESHOLD 4
++
++/******************************************************************************\
++********************************* Support Code *********************************
++\******************************************************************************/
++
++static gceSTATUS
++gckEVENT_AllocateQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR * Queue
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Do we have free queues? */
++ if (Event->freeList == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Move one free queue from the free list. */
++ * Queue = Event->freeList;
++ Event->freeList = Event->freeList->next;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Queue=0x%x", gcmOPT_POINTER(Queue));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeQueue(
++ IN gckEVENT Event,
++ OUT gcsEVENT_QUEUE_PTR Queue
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Queue != gcvNULL);
++
++ /* Move one free queue from the free list. */
++ Queue->next = Event->freeList;
++ Event->freeList = Queue;
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckEVENT_FreeRecord(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->freeEventMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Push the record on the free list. */
++ Record->next = Event->freeEventList;
++ Event->freeEventList = Record;
++ Event->freeEventCount += 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++gckEVENT_IsEmpty(
++ IN gckEVENT Event,
++ OUT gctBOOL_PTR IsEmpty
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T i;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(IsEmpty != gcvNULL);
++
++ /* Assume the event queue is empty. */
++ *IsEmpty = gcvTRUE;
++
++ /* Try acquiring the mutex. */
++ status = gckOS_AcquireMutex(Event->os, Event->eventQueueMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ /* Timeout - queue is no longer empty. */
++ *IsEmpty = gcvFALSE;
++ }
++ else
++ {
++ /* Bail out on error. */
++ gcmkONERROR(status);
++
++ /* Walk the event queue. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ /* Check whether this event is in use. */
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* The event is in use, hence the queue is not empty. */
++ *IsEmpty = gcvFALSE;
++ break;
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*IsEmpty=%d", gcmOPT_VALUE(IsEmpty));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_TryToIdleGPU(
++ IN gckEVENT Event
++)
++{
++ gceSTATUS status;
++ gctBOOL empty = gcvFALSE, idle = gcvFALSE;
++ gctBOOL powerLocked = gcvFALSE;
++ gckHARDWARE hardware;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Grab gckHARDWARE object. */
++ hardware = Event->kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Check whether the event queue is empty. */
++ gcmkONERROR(gckEVENT_IsEmpty(Event, &empty));
++
++ if (empty)
++ {
++ status = gckOS_AcquireMutex(hardware->os, hardware->powerMutex, 0);
++ if (status == gcvSTATUS_TIMEOUT)
++ {
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ powerLocked = gcvTRUE;
++
++ /* Query whether the hardware is idle. */
++ gcmkONERROR(gckHARDWARE_QueryIdle(Event->kernel->hardware, &idle));
++
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ powerLocked = gcvFALSE;
++
++ if (idle)
++ {
++ /* Inform the system of idle GPU. */
++ gcmkONERROR(gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_IDLE));
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (powerLocked)
++ {
++ gcmkONERROR(gckOS_ReleaseMutex(hardware->os, hardware->powerMutex));
++ powerLocked = gcvFALSE;
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++__RemoveRecordFromProcessDB(
++ IN gckEVENT Event,
++ IN gcsEVENT_PTR Record
++ )
++{
++ gcmkHEADER_ARG("Event=0x%x Record=0x%x", Event, Record);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ while (Record != gcvNULL)
++ {
++ if (Record->info.command == gcvHAL_SIGNAL)
++ {
++ /* TODO: Find a better place to bind signal to hardware.*/
++ gcmkVERIFY_OK(gckOS_SignalSetHardware(Event->os,
++ gcmUINT64_TO_PTR(Record->info.u.Signal.signal),
++ Event->kernel->hardware));
++ }
++
++ if (Record->fromKernel)
++ {
++ /* No need to check db if event is from kernel. */
++ Record = Record->next;
++ continue;
++ }
++
++ switch (Record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_NON_PAGED,
++ gcmUINT64_TO_PTR(Record->info.u.FreeNonPagedMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_CONTIGUOUS,
++ gcmUINT64_TO_PTR(Record->info.u.FreeContiguousMemory.logical)));
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY,
++ gcmUINT64_TO_PTR(Record->info.u.FreeVideoMemory.node)));
++
++ {
++ gcuVIDMEM_NODE_PTR node = (gcuVIDMEM_NODE_PTR)(gcmUINT64_TO_PTR(Record->info.u.FreeVideoMemory.node));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_RESERVED,
++ node));
++ }
++ else if(node->Virtual.contiguous)
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS,
++ node));
++ }
++ else
++ {
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_VIRTUAL,
++ node));
++ }
++ }
++
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_VIDEO_MEMORY_LOCKED,
++ gcmUINT64_TO_PTR(Record->info.u.UnlockVideoMemory.node)));
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_MAP_USER_MEMORY,
++ gcmINT2PTR(Record->info.u.UnmapUserMemory.info)));
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Event->kernel,
++ Record->processID,
++ gcvDB_COMMAND_BUFFER,
++ gcmUINT64_TO_PTR(Record->info.u.FreeVirtualCommandBuffer.logical)));
++ break;
++
++ default:
++ break;
++ }
++
++ Record = Record->next;
++ }
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_SubmitTimerFunction(
++ gctPOINTER Data
++ )
++{
++ gckEVENT event = (gckEVENT)Data;
++ gcmkVERIFY_OK(gckEVENT_Submit(event, gcvTRUE, gcvFALSE));
++}
++
++/******************************************************************************\
++******************************* gckEVENT API Code *******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckEVENT_Construct
++**
++** Construct a new gckEVENT object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gckEVENT * Event
++** Pointer to a variable that receives the gckEVENT object pointer.
++*/
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gckEVENT eventObj = gcvNULL;
++ int i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Event != gcvNULL);
++
++ /* Extract the pointer to the gckOS object. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate the gckEVENT object. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckEVENT), &pointer));
++
++ eventObj = pointer;
++
++ /* Reset the object. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(eventObj, gcmSIZEOF(struct _gckEVENT)));
++
++ /* Initialize the gckEVENT object. */
++ eventObj->object.type = gcvOBJ_EVENT;
++ eventObj->kernel = Kernel;
++ eventObj->os = os;
++
++ /* Create the mutexes. */
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventQueueMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->freeEventMutex));
++ gcmkONERROR(gckOS_CreateMutex(os, &eventObj->eventListMutex));
++
++ /* Create a bunch of event reccords. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcsEVENT), &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = eventObj->freeEventList;
++ eventObj->freeEventList = record;
++ eventObj->freeEventCount += 1;
++ }
++
++ /* Initialize the free list of event queues. */
++ for (i = 0; i < gcdREPO_LIST_COUNT; i += 1)
++ {
++ eventObj->repoList[i].next = eventObj->freeList;
++ eventObj->freeList = &eventObj->repoList[i];
++ }
++
++ /* Construct the atom. */
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->freeAtom));
++ gcmkONERROR(gckOS_AtomSet(os,
++ eventObj->freeAtom,
++ gcmCOUNTOF(eventObj->queues)));
++
++#if gcdSMP
++ gcmkONERROR(gckOS_AtomConstruct(os, &eventObj->pending));
++#endif
++
++ gcmkVERIFY_OK(gckOS_CreateTimer(os,
++ _SubmitTimerFunction,
++ (gctPOINTER)eventObj,
++ &eventObj->submitTimer));
++
++ /* Return pointer to the gckEVENT object. */
++ *Event = eventObj;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Event=0x%x", *Event);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (eventObj != gcvNULL)
++ {
++ if (eventObj->eventQueueMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventQueueMutex));
++ }
++
++ if (eventObj->freeEventMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->freeEventMutex));
++ }
++
++ if (eventObj->eventListMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, eventObj->eventListMutex));
++ }
++
++ while (eventObj->freeEventList != gcvNULL)
++ {
++ record = eventObj->freeEventList;
++ eventObj->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, record));
++ }
++
++ if (eventObj->freeAtom != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->freeAtom));
++ }
++
++#if gcdSMP
++ if (eventObj->pending != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_AtomDestroy(os, eventObj->pending));
++ }
++#endif
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, eventObj));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Destroy
++**
++** Destroy an gckEVENT object.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_PTR record;
++ gcsEVENT_QUEUE_PTR queue;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ if (Event->submitTimer != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_StopTimer(Event->os, Event->submitTimer));
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Event->os, Event->submitTimer));
++ }
++
++ /* Delete the queue mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventQueueMutex));
++
++ /* Free all free events. */
++ while (Event->freeEventList != gcvNULL)
++ {
++ record = Event->freeEventList;
++ Event->freeEventList = record->next;
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Delete the free mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->freeEventMutex));
++
++ /* Free all pending queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Free all pending events. */
++ while (queue->head != gcvNULL)
++ {
++ record = queue->head;
++ queue->head = record->next;
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(record) + gcmSIZEOF(queue->source),
++ "Event record 0x%x is still pending for %d.",
++ record, queue->source
++ );
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, record));
++ }
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead =
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkVERIFY_OK(gckEVENT_FreeQueue(Event, queue));
++ }
++
++ /* Delete the list mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Event->os, Event->eventListMutex));
++
++ /* Delete the atom. */
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->freeAtom));
++
++#if gcdSMP
++ gcmkVERIFY_OK(gckOS_AtomDestroy(Event->os, Event->pending));
++#endif
++
++ /* Mark the gckEVENT object as unknown. */
++ Event->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckEVENT object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Event->os, Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_GetEvent
++**
++** Reserve the next available hardware event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Set to gcvTRUE to force the function to wait if no events are
++** immediately available.
++**
++** gceKERNEL_WHERE Source
++** Source of the event.
++**
++** OUTPUT:
++**
++** gctUINT8 * EventID
++** Reserved event ID.
++*/
++static gceSTATUS
++gckEVENT_GetEvent(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ OUT gctUINT8 * EventID,
++ IN gcsEVENT_PTR Head,
++ IN gceKERNEL_WHERE Source
++ )
++{
++ gctINT i, id;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT32 free;
++
++#if gcdGPU_TIMEOUT
++ gctUINT32 timer = 0;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x Head=%p Source=%d", Event, Head, Source);
++
++ while (gcvTRUE)
++ {
++ /* Grab the queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Walk through all events. */
++ id = Event->lastID;
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ gctINT nextID = gckMATH_ModuloInt((id + 1),
++ gcmCOUNTOF(Event->queues));
++
++ if (Event->queues[id].head == gcvNULL)
++ {
++ *EventID = (gctUINT8) id;
++
++ Event->lastID = (gctUINT8) nextID;
++
++ /* Save time stamp of event. */
++ Event->queues[id].stamp = ++(Event->stamp);
++ Event->queues[id].head = Head;
++ Event->queues[id].source = Source;
++
++ gcmkONERROR(gckOS_AtomDecrement(Event->os,
++ Event->freeAtom,
++ &free));
++#if gcdDYNAMIC_SPEED
++ if (free <= gcdDYNAMIC_EVENT_THRESHOLD)
++ {
++ gcmkONERROR(gckOS_BroadcastHurry(
++ Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD - free));
++ }
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os,
++ Event->eventQueueMutex));
++
++ /* Success. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(id),
++ "Using id=%d",
++ id
++ );
++
++ gcmkFOOTER_ARG("*EventID=%u", *EventID);
++ return gcvSTATUS_OK;
++ }
++
++ id = nextID;
++ }
++
++#if gcdDYNAMIC_SPEED
++ /* No free events, speed up the GPU right now! */
++ gcmkONERROR(gckOS_BroadcastHurry(Event->os,
++ Event->kernel->hardware,
++ gcdDYNAMIC_EVENT_THRESHOLD));
++#endif
++
++ /* Release the queue mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Fail if wait is not requested. */
++ if (!Wait)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Delay a while. */
++ gcmkONERROR(gckOS_Delay(Event->os, 1));
++
++#if gcdGPU_TIMEOUT
++ /* Increment the wait timer. */
++ timer += 1;
++
++ if (timer == Event->kernel->timeOut)
++ {
++ /* Try to call any outstanding events. */
++ gcmkONERROR(gckHARDWARE_Interrupt(Event->kernel->hardware,
++ gcvTRUE));
++ }
++ else if (timer > Event->kernel->timeOut)
++ {
++ gcmkTRACE_N(
++ gcvLEVEL_ERROR,
++ gcmSIZEOF(gctCONST_STRING) + gcmSIZEOF(gctINT),
++ "%s(%d): no available events\n",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Bail out. */
++ gcmkONERROR(gcvSTATUS_GPU_NOT_RESPONDING);
++ }
++#endif
++ }
++
++OnError:
++ if (acquired)
++ {
++ /* Release the queue mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AllocateRecord
++**
++** Allocate a record for the new event.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** gcsEVENT_PTR * Record
++** Allocated event record.
++*/
++gceSTATUS
++gckEVENT_AllocateRecord(
++ IN gckEVENT Event,
++ IN gctBOOL AllocateAllowed,
++ OUT gcsEVENT_PTR * Record
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctINT i;
++ gcsEVENT_PTR record;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Event=0x%x AllocateAllowed=%d", Event, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Record != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->freeEventMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Test if we are below the allocation threshold. */
++ if ( (AllocateAllowed && (Event->freeEventCount < gcdEVENT_MIN_THRESHOLD)) ||
++ (Event->freeEventCount == 0) )
++ {
++ /* Allocate a bunch of records. */
++ for (i = 0; i < gcdEVENT_ALLOCATION_COUNT; i += 1)
++ {
++ /* Allocate an event record. */
++ gcmkONERROR(gckOS_Allocate(Event->os,
++ gcmSIZEOF(gcsEVENT),
++ &pointer));
++
++ record = pointer;
++
++ /* Push it on the free list. */
++ record->next = Event->freeEventList;
++ Event->freeEventList = record;
++ Event->freeEventCount += 1;
++ }
++ }
++
++ *Record = Event->freeEventList;
++ Event->freeEventList = Event->freeEventList->next;
++ Event->freeEventCount -= 1;
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Record=0x%x", gcmOPT_POINTER(Record));
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->freeEventMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_AddList
++**
++** Add a new event to the list of events.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_INTERFACE_PTR Interface
++** Pointer to the interface for the event to be added.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gctBOOL AllocateAllowed
++** State for allocation if out of free events.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record = gcvNULL;
++ gcsEVENT_QUEUE_PTR queue;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Interface=0x%x",
++ Event, Interface);
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, _GC_OBJ_ZONE,
++ "FromWhere=%d AllocateAllowed=%d",
++ FromWhere, AllocateAllowed);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ /* Verify the event command. */
++ gcmkASSERT
++ ( (Interface->command == gcvHAL_FREE_NON_PAGED_MEMORY)
++ || (Interface->command == gcvHAL_FREE_CONTIGUOUS_MEMORY)
++ || (Interface->command == gcvHAL_FREE_VIDEO_MEMORY)
++ || (Interface->command == gcvHAL_WRITE_DATA)
++ || (Interface->command == gcvHAL_UNLOCK_VIDEO_MEMORY)
++ || (Interface->command == gcvHAL_SIGNAL)
++ || (Interface->command == gcvHAL_UNMAP_USER_MEMORY)
++ || (Interface->command == gcvHAL_TIMESTAMP)
++ || (Interface->command == gcvHAL_COMMIT_DONE)
++ || (Interface->command == gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER)
++ || (Interface->command == gcvHAL_SYNC_POINT)
++ );
++
++ /* Validate the source. */
++ if ((FromWhere != gcvKERNEL_COMMAND) && (FromWhere != gcvKERNEL_PIXEL))
++ {
++ /* Invalid argument. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Allocate a free record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, AllocateAllowed, &record));
++
++ /* Termninate the record. */
++ record->next = gcvNULL;
++
++ /* Record the committer. */
++ record->fromKernel = FromKernel;
++
++ /* Copy the event interface into the record. */
++ gckOS_MemCopy(&record->info, Interface, gcmSIZEOF(record->info));
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&record->processID));
++
++#ifdef __QNXNTO__
++ record->kernel = Event->kernel;
++#endif
++
++ gcmkONERROR(__RemoveRecordFromProcessDB(Event, record));
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os, Event->eventListMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Do we need to allocate a new queue? */
++ if ((Event->queueTail == gcvNULL) || (Event->queueTail->source < FromWhere))
++ {
++ /* Allocate a new queue. */
++ gcmkONERROR(gckEVENT_AllocateQueue(Event, &queue));
++
++ /* Initialize the queue. */
++ queue->source = FromWhere;
++ queue->head = gcvNULL;
++ queue->next = gcvNULL;
++
++ /* Attach it to the list of allocated queues. */
++ if (Event->queueTail == gcvNULL)
++ {
++ Event->queueHead =
++ Event->queueTail = queue;
++ }
++ else
++ {
++ Event->queueTail->next = queue;
++ Event->queueTail = queue;
++ }
++ }
++ else
++ {
++ queue = Event->queueTail;
++ }
++
++ /* Attach the record to the queue. */
++ if (queue->head == gcvNULL)
++ {
++ queue->head = record;
++ queue->tail = record;
++ }
++ else
++ {
++ queue->tail->next = record;
++ queue->tail = record;
++ }
++
++ /* Unmap user space logical address.
++ * Linux kernel does not support unmap the memory of other process any more since 3.5.
++ * Let's unmap memory of self process before submit the event to gpu.
++ * */
++ switch(Interface->command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physical),
++ (gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
++ break;
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkONERROR(gckOS_UnmapUserLogical(
++ Event->os,
++ gcmNAME_TO_PTR(Interface->u.FreeContiguousMemory.physical),
++ (gctSIZE_T) Interface->u.FreeContiguousMemory.bytes,
++ gcmUINT64_TO_PTR(Interface->u.FreeContiguousMemory.logical)));
++ break;
++ default:
++ break;
++ }
++
++
++ /* Release the mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (record != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Unlock
++**
++** Schedule an event to unlock virtual memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union that specifies the virtual memory
++** to unlock.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d Node=0x%x Type=%d",
++ Event, FromWhere, Node, Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Mark the event as an unlock. */
++ iface.command = gcvHAL_UNLOCK_VIDEO_MEMORY;
++ iface.u.UnlockVideoMemory.node = gcmPTR_TO_UINT64(Node);
++ iface.u.UnlockVideoMemory.type = Type;
++ iface.u.UnlockVideoMemory.asynchroneous = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeVideoMemory
++**
++** Schedule an event to free video memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcuVIDMEM_NODE_PTR VideoMemory
++** Pointer to a gcuVIDMEM_NODE object to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_FreeVideoMemory(
++ IN gckEVENT Event,
++ IN gcuVIDMEM_NODE_PTR VideoMemory,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x VideoMemory=0x%x FromWhere=%d",
++ Event, VideoMemory, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(VideoMemory != gcvNULL);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_VIDEO_MEMORY;
++ iface.u.FreeVideoMemory.node = gcmPTR_TO_UINT64(VideoMemory);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeNonPagedMemory
++**
++** Schedule an event to free non-paged memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of non-paged memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of non-paged memory to free.
++**
++** gctPOINTER Logical
++** Logical address of non-paged memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_NON_PAGED_MEMORY;
++ iface.u.FreeNonPagedMemory.bytes = Bytes;
++ iface.u.FreeNonPagedMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeNonPagedMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER;
++ iface.u.FreeVirtualCommandBuffer.bytes = Bytes;
++ iface.u.FreeVirtualCommandBuffer.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeVirtualCommandBuffer.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_FreeContigiuousMemory
++**
++** Schedule an event to free contiguous memory.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIZE_T Bytes
++** Number of bytes of contiguous memory to free.
++**
++** gctPHYS_ADDR Physical
++** Physical address of contiguous memory to free.
++**
++** gctPOINTER Logical
++** Logical address of contiguous memory to free.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++*/
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gckKERNEL kernel = Event->kernel;
++
++ gcmkHEADER_ARG("Event=0x%x Bytes=%lu Physical=0x%x Logical=0x%x "
++ "FromWhere=%d",
++ Event, Bytes, Physical, Logical, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Create an event. */
++ iface.command = gcvHAL_FREE_CONTIGUOUS_MEMORY;
++ iface.u.FreeContiguousMemory.bytes = Bytes;
++ iface.u.FreeContiguousMemory.physical = gcmPTR_TO_NAME(Physical);
++ iface.u.FreeContiguousMemory.logical = gcmPTR_TO_UINT64(Logical);
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Signal
++**
++** Schedule an event to trigger a signal.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x Signal=0x%x FromWhere=%d",
++ Event, Signal, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Mark the event as a signal. */
++ iface.command = gcvHAL_SIGNAL;
++ iface.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ iface.u.Signal.coid = 0;
++ iface.u.Signal.rcvid = 0;
++#endif
++ iface.u.Signal.auxSignal = 0;
++ iface.u.Signal.process = 0;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_CommitDone
++**
++** Schedule an event to wake up work thread when commit is done by GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gceKERNEL_WHERE FromWhere
++** Place in the pipe where the event needs to be generated.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++
++ gcmkHEADER_ARG("Event=0x%x FromWhere=%d", Event, FromWhere);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ iface.command = gcvHAL_COMMIT_DONE;
++
++ /* Append it to the queue. */
++ gcmkONERROR(gckEVENT_AddList(Event, &iface, FromWhere, gcvFALSE, gcvTRUE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++/*******************************************************************************
++**
++** gckEVENT_Submit
++**
++** Submit the current event queue to the GPU.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctBOOL Wait
++** Submit requires one vacant event; if Wait is set to not zero,
++** and there are no vacant events at this time, the function will
++** wait until an event becomes vacant so that submission of the
++** queue is successful.
++**
++** gctBOOL FromPower
++** Determines whether the call originates from inside the power
++** management or not.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ )
++{
++ gceSTATUS status;
++ gctUINT8 id = 0xFF;
++ gcsEVENT_QUEUE_PTR queue;
++ gctBOOL acquired = gcvFALSE;
++ gckCOMMAND command = gcvNULL;
++ gctBOOL commitEntered = gcvFALSE;
++#if !gcdNULL_DRIVER
++ gctSIZE_T bytes;
++ gctPOINTER buffer;
++#endif
++
++ gcmkHEADER_ARG("Event=0x%x Wait=%d", Event, Wait);
++
++ /* Get gckCOMMAND object. */
++ command = Event->kernel->command;
++
++ /* Are there event queues? */
++ if (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, FromPower));
++ commitEntered = gcvTRUE;
++
++ /* Process all queues. */
++ while (Event->queueHead != gcvNULL)
++ {
++ /* Acquire the list mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventListMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Get the current queue. */
++ queue = Event->queueHead;
++
++ /* Allocate an event ID. */
++ gcmkONERROR(gckEVENT_GetEvent(Event, Wait, &id, queue->head, queue->source));
++
++ /* Copy event list to event ID queue. */
++ Event->queues[id].head = queue->head;
++
++ /* Remove the top queue from the list. */
++ if (Event->queueHead == Event->queueTail)
++ {
++ Event->queueHead = gcvNULL;
++ Event->queueTail = gcvNULL;
++ }
++ else
++ {
++ Event->queueHead = Event->queueHead->next;
++ }
++
++ /* Free the queue. */
++ gcmkONERROR(gckEVENT_FreeQueue(Event, queue));
++
++ /* Release the list mutex. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ acquired = gcvFALSE;
++
++#if gcdNULL_DRIVER
++ /* Notify immediately on infinite hardware. */
++ gcmkONERROR(gckEVENT_Interrupt(Event, 1 << id));
++
++ gcmkONERROR(gckEVENT_Notify(Event, 0));
++#else
++ /* Get the size of the hardware event. */
++ gcmkONERROR(gckHARDWARE_Event(Event->kernel->hardware,
++ gcvNULL,
++ id,
++ Event->queues[id].source,
++ &bytes));
++
++ /* Reserve space in the command queue. */
++ gcmkONERROR(gckCOMMAND_Reserve(command,
++ bytes,
++ &buffer,
++ &bytes));
++
++ /* Set the hardware event in the command queue. */
++ gcmkONERROR(gckHARDWARE_Event(Event->kernel->hardware,
++ buffer,
++ id,
++ Event->queues[id].source,
++ &bytes));
++
++ /* Execute the hardware event. */
++ gcmkONERROR(gckCOMMAND_Execute(command, bytes));
++#endif
++ }
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, FromPower));
++ commitEntered = gcvFALSE;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, FromPower));
++ }
++
++ if (acquired)
++ {
++ /* Need to unroll the mutex acquire. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventListMutex));
++ }
++
++ if (id != 0xFF)
++ {
++ /* Need to unroll the event allocation. */
++ Event->queues[id].head = gcvNULL;
++ }
++
++ if (status == gcvSTATUS_GPU_NOT_RESPONDING)
++ {
++ /* Broadcast GPU stuck. */
++ status = gckOS_Broadcast(Event->os,
++ Event->kernel->hardware,
++ gcvBROADCAST_GPU_STUCK);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Commit
++**
++** Commit an event queue from the user.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsQUEUE_PTR Queue
++** User event queue.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ )
++{
++ gceSTATUS status;
++ gcsQUEUE_PTR record = gcvNULL, next;
++ gctUINT32 processID;
++ gctBOOL needCopy = gcvFALSE;
++
++ gcmkHEADER_ARG("Event=0x%x Queue=0x%x", Event, Queue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Get the current process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Query if we need to copy the client data. */
++ gcmkONERROR(gckOS_QueryNeedCopy(Event->os, processID, &needCopy));
++
++ /* Loop while there are records in the queue. */
++ while (Queue != gcvNULL)
++ {
++ gcsQUEUE queue;
++
++ if (needCopy)
++ {
++ /* Point to stack record. */
++ record = &queue;
++
++ /* Copy the data from the client. */
++ gcmkONERROR(gckOS_CopyFromUserData(Event->os,
++ record,
++ Queue,
++ gcmSIZEOF(gcsQUEUE)));
++ }
++ else
++ {
++ gctPOINTER pointer = gcvNULL;
++
++ /* Map record into kernel memory. */
++ gcmkONERROR(gckOS_MapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ &pointer));
++
++ record = pointer;
++ }
++
++ /* Append event record to event queue. */
++ gcmkONERROR(
++ gckEVENT_AddList(Event, &record->iface, gcvKERNEL_PIXEL, gcvTRUE, gcvFALSE));
++
++ /* Next record in the queue. */
++ next = gcmUINT64_TO_PTR(record->next);
++
++ if (!needCopy)
++ {
++ /* Unmap record from kernel memory. */
++ gcmkONERROR(
++ gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ record = gcvNULL;
++ }
++
++ Queue = next;
++ }
++
++ /* Submit the event list. */
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++
++ /* Success */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if ((record != gcvNULL) && !needCopy)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_UnmapUserPointer(Event->os,
++ Queue,
++ gcmSIZEOF(gcsQUEUE),
++ (gctPOINTER *) record));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Compose
++**
++** Schedule a composition event and start a composition.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gcsHAL_COMPOSE_PTR Info
++** Pointer to the composition structure.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ )
++{
++ gceSTATUS status;
++ gcsEVENT_PTR headRecord;
++ gcsEVENT_PTR tailRecord;
++ gcsEVENT_PTR tempRecord;
++ gctUINT8 id = 0xFF;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Event=0x%x Info=0x%x", Event, Info);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ /* Get process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ headRecord = tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->process;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->signal;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++
++ /* Allocate another record for user signal #1. */
++ if (gcmUINT64_TO_PTR(Info->userSignal1) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++ tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal1;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Allocate another record for user signal #2. */
++ if (gcmUINT64_TO_PTR(Info->userSignal2) != gcvNULL)
++ {
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &tempRecord));
++ tailRecord->next = tempRecord;
++ tailRecord = tempRecord;
++
++ /* Initialize the record. */
++ tempRecord->info.command = gcvHAL_SIGNAL;
++ tempRecord->info.u.Signal.process = Info->userProcess;
++#ifdef __QNXNTO__
++ tempRecord->info.u.Signal.coid = Info->coid;
++ tempRecord->info.u.Signal.rcvid = Info->rcvid;
++#endif
++ tempRecord->info.u.Signal.signal = Info->userSignal2;
++ tempRecord->info.u.Signal.auxSignal = 0;
++ tempRecord->next = gcvNULL;
++ tempRecord->processID = processID;
++ }
++
++ /* Allocate an event ID. */
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, headRecord, gcvKERNEL_PIXEL));
++
++ /* Start composition. */
++ gcmkONERROR(gckHARDWARE_Compose(
++ Event->kernel->hardware, processID,
++ gcmUINT64_TO_PTR(Info->physical), gcmUINT64_TO_PTR(Info->logical), Info->offset, Info->size, id
++ ));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Interrupt
++**
++** Called by the interrupt service routine to store the triggered interrupt
++** mask to be later processed by gckEVENT_Notify.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 Data
++** Mask for the 32 interrupts.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++ IN gctUINT32 Data
++ )
++{
++ unsigned long flags;
++ gcmkHEADER_ARG("Event=0x%x Data=0x%x", Event, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Combine current interrupt status with pending flags. */
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ gckOS_AtomSetMask(Event->pending, Data);
++#elif defined(__QNXNTO__)
++ atomic_set(&Event->pending, Data);
++#else
++ Event->pending |= Data;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckEVENT_Notify
++**
++** Process all triggered interrupts.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctINT i;
++ gcsEVENT_QUEUE * queue;
++ gctUINT mask = 0;
++ gctBOOL acquired = gcvFALSE;
++ gcuVIDMEM_NODE_PTR node;
++ gctPOINTER info;
++ gctSIGNAL signal;
++ gctUINT pending;
++ gckKERNEL kernel = Event->kernel;
++#if !gcdSMP
++ gctBOOL suspended = gcvFALSE;
++#endif
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctINT eventNumber = 0;
++#endif
++ gctINT32 free;
++#if gcdSECURE_USER
++ gcskSECURE_CACHE_PTR cache;
++#endif
++ unsigned long flags;
++
++ gcmkHEADER_ARG("Event=0x%x IDs=0x%x", Event, IDs);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ gcmDEBUG_ONLY(
++ if (IDs != 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++ for (;;)
++ {
++ gcsEVENT_PTR record;
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Get current interrupts. */
++ gckOS_AtomGet(Event->os, Event->pending, (gctINT32_PTR)&pending);
++#else
++ /* Get current interrupts. */
++ pending = Event->pending;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ if (pending & 0x80000000)
++ {
++ //gckOS_Print("!!!!!!!!!!!!! AXI BUS ERROR !!!!!!!!!!!!!\n");
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_EVENT, "AXI BUS ERROR");
++ pending &= 0x7FFFFFFF;
++ }
++
++ if (pending & 0x40000000)
++ {
++ gckHARDWARE_DumpMMUException(Event->kernel->hardware);
++
++ pending &= 0x3FFFFFFF;
++ }
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Pending interrupts 0x%x",
++ pending
++ );
++
++ if (pending == 0)
++ {
++ /* No more pending interrupts - done. */
++ break;
++ }
++
++ queue = gcvNULL;
++
++ /* Grab the mutex queue. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmDEBUG_ONLY(
++ if (IDs == 0)
++ {
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Queue(%d): stamp=%llu source=%d",
++ i,
++ Event->queues[i].stamp,
++ Event->queues[i].source);
++ }
++ }
++ }
++ );
++
++ /* Find the oldest pending interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (pending & (1 << i))
++ )
++ {
++ if ((queue == gcvNULL)
++ || (Event->queues[i].stamp < queue->stamp)
++ )
++ {
++ queue = &Event->queues[i];
++ mask = 1 << i;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ eventNumber = i;
++#endif
++ }
++ }
++ }
++
++ if (queue == gcvNULL)
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(pending),
++ "Interrupts 0x%x are not pending.",
++ pending
++ );
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Mark pending interrupts as handled. */
++ gckOS_AtomClearMask(Event->pending, pending);
++#elif defined(__QNXNTO__)
++ /* Mark pending interrupts as handled. */
++ atomic_clr((gctUINT32_PTR)&Event->pending, pending);
++#else
++ /* Mark pending interrupts as handled. */
++ Event->pending &= ~pending;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++ break;
++ }
++
++ /* Check whether there is a missed interrupt. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if ((Event->queues[i].head != gcvNULL)
++ && (Event->queues[i].stamp < queue->stamp)
++ && (Event->queues[i].source <= queue->source)
++ )
++ {
++ gcmkTRACE_N(
++ gcvLEVEL_ERROR,
++ gcmSIZEOF(i) + gcmSIZEOF(Event->queues[i].stamp),
++ "Event %d lost (stamp %llu)",
++ i, Event->queues[i].stamp
++ );
++
++ /* Use this event instead. */
++ queue = &Event->queues[i];
++ mask = 0;
++ }
++ }
++
++ if (mask != 0)
++ {
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(eventNumber),
++ "Processing interrupt %d",
++ eventNumber
++ );
++#endif
++ }
++
++ spin_lock_irqsave(&Event->kernel->irq_lock, flags);
++#if gcdSMP
++ /* Mark pending interrupt as handled. */
++ gckOS_AtomClearMask(Event->pending, mask);
++#elif defined(__QNXNTO__)
++ /* Mark pending interrupt as handled. */
++ atomic_clr(&Event->pending, mask);
++#else
++ /* Mark pending interrupt as handled. */
++ Event->pending &= ~mask;
++#endif
++ spin_unlock_irqrestore(&Event->kernel->irq_lock, flags);
++
++ /* We are in the notify loop. */
++ Event->inNotify = gcvTRUE;
++
++ /* We are in the notify loop. */
++ Event->inNotify = gcvTRUE;
++
++ /* Grab the event head. */
++ record = queue->head;
++
++ /* Now quickly clear its event list. */
++ queue->head = gcvNULL;
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Increase the number of free events. */
++ gcmkONERROR(gckOS_AtomIncrement(Event->os, Event->freeAtom, &free));
++
++ /* Walk all events for this interrupt. */
++ while (record != gcvNULL)
++ {
++ gcsEVENT_PTR recordNext;
++#ifndef __QNXNTO__
++ gctPOINTER logical;
++#endif
++#if gcdSECURE_USER
++ gctSIZE_T bytes;
++#endif
++
++ /* Grab next record. */
++ recordNext = record->next;
++
++#ifdef __QNXNTO__
++ /* Assign record->processID as the pid for this galcore thread.
++ * Used in OS calls like gckOS_UnlockMemory() which do not take a pid.
++ */
++ drv_thread_specific_key_assign(record->processID, 0, Event->kernel->core);
++#endif
++
++#if gcdSECURE_USER
++ /* Get the cache that belongs to this process. */
++ gcmkONERROR(gckKERNEL_GetProcessDBCache(Event->kernel,
++ record->processID,
++ &cache));
++#endif
++
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_INFO, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Processing event type: %d",
++ record->info.command
++ );
++
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_NON_PAGED_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical));
++
++ /* Free non-paged memory. */
++ status = gckOS_FreeNonPagedMemory(
++ Event->os,
++ (gctSIZE_T) record->info.u.FreeNonPagedMemory.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeNonPagedMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeNonPagedMemory.logical));
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->record.u.FreeNonPagedMemory.logical),
++ (gctSIZE_T) record->record.u.FreeNonPagedMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_CONTIGUOUS_MEMORY: 0x%x",
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical));
++
++ /* Unmap the user memory. */
++ status = gckOS_FreeContiguous(
++ Event->os,
++ gcmNAME_TO_PTR(record->info.u.FreeContiguousMemory.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) record->info.u.FreeContiguousMemory.bytes);
++
++ if (gcmIS_SUCCESS(status))
++ {
++#if gcdSECURE_USER
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->record.u.FreeContiguousMemory.logical),
++ (gctSIZE_T) record->record.u.FreeContiguousMemory.bytes));
++#endif
++ }
++ gcmRELEASE_NAME(record->info.u.FreeContiguousMemory.physical);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(record->info.u.FreeVideoMemory.node);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_FREE_VIDEO_MEMORY: 0x%x",
++ node);
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ /* Check if the VidMem object still exists. */
++ if (gckKERNEL_GetVideoMemoryPoolPid(record->kernel,
++ gcvPOOL_SYSTEM,
++ record->processID,
++ gcvNULL) == gcvSTATUS_NOT_FOUND)
++ {
++ /*printf("Vidmem not found for process:%d\n", queue->processID);*/
++ status = gcvSTATUS_OK;
++ break;
++ }
++#else
++ if ((node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ && (node->VidMem.logical != gcvNULL)
++ )
++ {
++ gcmkERR_BREAK(
++ gckKERNEL_UnmapVideoMemory(record->kernel,
++ node->VidMem.logical,
++ record->processID,
++ node->VidMem.bytes));
++ node->VidMem.logical = gcvNULL;
++ }
++#endif
++#endif
++
++ /* Free video memory. */
++ status =
++ gckVIDMEM_Free(Event->kernel, node);
++
++ break;
++
++ case gcvHAL_WRITE_DATA:
++#ifndef __QNXNTO__
++ /* Convert physical into logical address. */
++ gcmkERR_BREAK(
++ gckOS_MapPhysical(Event->os,
++ record->info.u.WriteData.address,
++ gcmSIZEOF(gctUINT32),
++ &logical));
++
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ logical,
++ record->info.u.WriteData.data));
++
++ /* Unmap the physical memory. */
++ gcmkERR_BREAK(
++ gckOS_UnmapPhysical(Event->os,
++ logical,
++ gcmSIZEOF(gctUINT32)));
++#else
++ /* Write data. */
++ gcmkERR_BREAK(
++ gckOS_WriteMemory(Event->os,
++ (gctPOINTER)
++ record->info.u.WriteData.address,
++ record->info.u.WriteData.data));
++#endif
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(record->info.u.UnlockVideoMemory.node);
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNLOCK_VIDEO_MEMORY: 0x%x",
++ node);
++
++ /* Save node information before it disappears. */
++#if gcdSECURE_USER
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock. */
++ status = gckVIDMEM_Unlock(
++ Event->kernel,
++ node,
++ record->info.u.UnlockVideoMemory.type,
++ gcvNULL);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status) && (logical != gcvNULL))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++ break;
++
++ case gcvHAL_SIGNAL:
++ signal = gcmUINT64_TO_PTR(record->info.u.Signal.signal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_SIGNAL: 0x%x",
++ signal);
++
++#ifdef __QNXNTO__
++ if ((record->info.u.Signal.coid == 0)
++ && (record->info.u.Signal.rcvid == 0)
++ )
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ record->info.u.Signal.rcvid,
++ record->info.u.Signal.coid));
++ }
++#else
++ /* Set signal. */
++ if (gcmUINT64_TO_PTR(record->info.u.Signal.process) == gcvNULL)
++ {
++ /* Kernel signal. */
++ gcmkERR_BREAK(
++ gckOS_Signal(Event->os,
++ signal,
++ gcvTRUE));
++ }
++ else
++ {
++ /* User signal. */
++ gcmkERR_BREAK(
++ gckOS_UserSignal(Event->os,
++ signal,
++ gcmUINT64_TO_PTR(record->info.u.Signal.process)));
++ }
++
++ gcmkASSERT(record->info.u.Signal.auxSignal == 0);
++#endif
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ info = gcmNAME_TO_PTR(record->info.u.UnmapUserMemory.info);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_UNMAP_USER_MEMORY: 0x%x",
++ info);
++
++ /* Unmap the user memory. */
++ status = gckOS_UnmapUserMemory(
++ Event->os,
++ Event->kernel->core,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size,
++ info,
++ record->info.u.UnmapUserMemory.address);
++
++#if gcdSECURE_USER
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(
++ Event->kernel,
++ cache,
++ gcmUINT64_TO_PTR(record->info.u.UnmapUserMemory.memory),
++ (gctSIZE_T) record->info.u.UnmapUserMemory.size));
++ }
++#endif
++ gcmRELEASE_NAME(record->info.u.UnmapUserMemory.info);
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "gcvHAL_TIMESTAMP: %d %d",
++ record->info.u.TimeStamp.timer,
++ record->info.u.TimeStamp.request);
++
++ /* Process the timestamp. */
++ switch (record->info.u.TimeStamp.request)
++ {
++ case 0:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ stopTime);
++ break;
++
++ case 1:
++ status = gckOS_GetTime(&Event->kernel->timers[
++ record->info.u.TimeStamp.timer].
++ startTime);
++ break;
++
++ default:
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.u.TimeStamp.request),
++ "Invalid timestamp request: %d",
++ record->info.u.TimeStamp.request
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ break;
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkVERIFY_OK(
++ gckKERNEL_DestroyVirtualCommandBuffer(Event->kernel,
++ (gctSIZE_T) record->info.u.FreeVirtualCommandBuffer.bytes,
++ gcmNAME_TO_PTR(record->info.u.FreeVirtualCommandBuffer.physical),
++ gcmUINT64_TO_PTR(record->info.u.FreeVirtualCommandBuffer.logical)
++ ));
++ gcmRELEASE_NAME(record->info.u.FreeVirtualCommandBuffer.physical);
++ break;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ case gcvHAL_SYNC_POINT:
++ {
++ gctSYNC_POINT syncPoint;
++
++ syncPoint = gcmUINT64_TO_PTR(record->info.u.SyncPoint.syncPoint);
++ status = gckOS_SignalSyncPoint(Event->os, syncPoint);
++ }
++ break;
++#endif
++
++ case gcvHAL_COMMIT_DONE:
++ break;
++
++ default:
++ /* Invalid argument. */
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_ERROR, gcvZONE_EVENT,
++ gcmSIZEOF(record->info.command),
++ "Unknown event type: %d",
++ record->info.command
++ );
++
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Make sure there are no errors generated. */
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE_N(
++ gcvLEVEL_WARNING, gcvZONE_EVENT,
++ gcmSIZEOF(status),
++ "Event produced status: %d(%s)",
++ status, gckOS_DebugStatus2Name(status));
++ }
++
++ /* Free the event. */
++ gcmkVERIFY_OK(gckEVENT_FreeRecord(Event, record));
++
++ /* Advance to next record. */
++ record = recordNext;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_EVENT,
++ "Handled interrupt 0x%x", mask);
++ }
++
++ if (IDs == 0)
++ {
++ gcmkONERROR(_TryToIdleGPU(Event));
++ }
++
++ /* We are out the notify loop. */
++ Event->inNotify = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++#if !gcdSMP
++ if (suspended)
++ {
++ /* Resume interrupts. */
++ gcmkVERIFY_OK(gckOS_ResumeInterruptEx(Event->os, Event->kernel->core));
++ }
++#endif
++
++ /* We are out the notify loop. */
++ Event->inNotify = gcvFALSE;
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_FreeProcess
++**
++** Free all events owned by a particular process ID.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID of the process to be freed up.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ )
++{
++ gctSIZE_T i;
++ gctBOOL acquired = gcvFALSE;
++ gcsEVENT_PTR record, next;
++ gceSTATUS status;
++ gcsEVENT_PTR deleteHead, deleteTail;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%d", Event, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Walk through all queues. */
++ for (i = 0; i < gcmCOUNTOF(Event->queues); ++i)
++ {
++ if (Event->queues[i].head != gcvNULL)
++ {
++ /* Grab the event queue mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Event->os,
++ Event->eventQueueMutex,
++ gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Grab the mutex head. */
++ record = Event->queues[i].head;
++ Event->queues[i].head = gcvNULL;
++ Event->queues[i].tail = gcvNULL;
++ deleteHead = gcvNULL;
++ deleteTail = gcvNULL;
++
++ while (record != gcvNULL)
++ {
++ next = record->next;
++ if (record->processID == ProcessID)
++ {
++ if (deleteHead == gcvNULL)
++ {
++ deleteHead = record;
++ }
++ else
++ {
++ deleteTail->next = record;
++ }
++
++ deleteTail = record;
++ }
++ else
++ {
++ if (Event->queues[i].head == gcvNULL)
++ {
++ Event->queues[i].head = record;
++ }
++ else
++ {
++ Event->queues[i].tail->next = record;
++ }
++
++ Event->queues[i].tail = record;
++ }
++
++ record->next = gcvNULL;
++ record = next;
++ }
++
++ /* Release the mutex queue. */
++ gcmkONERROR(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ acquired = gcvFALSE;
++
++ /* Loop through the entire list of events. */
++ for (record = deleteHead; record != gcvNULL; record = next)
++ {
++ /* Get the next event record. */
++ next = record->next;
++
++ /* Free the event record. */
++ gcmkONERROR(gckEVENT_FreeRecord(Event, record));
++ }
++ }
++ }
++
++ gcmkONERROR(_TryToIdleGPU(Event));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Release the event queue mutex. */
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Event->os, Event->eventQueueMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++** gckEVENT_Stop
++**
++** Stop the hardware using the End event mechanism.
++**
++** INPUT:
++**
++** gckEVENT Event
++** Pointer to an gckEVENT object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIGNAL Signal
++** Pointer to the signal to trigger.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctSIZE_T * waitSize
++ )
++{
++ gceSTATUS status;
++ /* gctSIZE_T waitSize;*/
++ gcsEVENT_PTR record;
++ gctUINT8 id = 0xFF;
++
++ gcmkHEADER_ARG("Event=0x%x ProcessID=%u Handle=0x%x Logical=0x%x "
++ "Signal=0x%x",
++ Event, ProcessID, Handle, Logical, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Event, gcvOBJ_EVENT);
++
++ /* Submit the current event queue. */
++ gcmkONERROR(gckEVENT_Submit(Event, gcvTRUE, gcvFALSE));
++
++ /* Allocate a record. */
++ gcmkONERROR(gckEVENT_AllocateRecord(Event, gcvTRUE, &record));
++
++ /* Initialize the record. */
++ record->next = gcvNULL;
++ record->processID = ProcessID;
++ record->info.command = gcvHAL_SIGNAL;
++ record->info.u.Signal.signal = gcmPTR_TO_UINT64(Signal);
++#ifdef __QNXNTO__
++ record->info.u.Signal.coid = 0;
++ record->info.u.Signal.rcvid = 0;
++#endif
++ record->info.u.Signal.auxSignal = 0;
++ record->info.u.Signal.process = 0;
++
++
++ gcmkONERROR(gckEVENT_GetEvent(Event, gcvTRUE, &id, record, gcvKERNEL_PIXEL));
++
++ /* Replace last WAIT with END. */
++ gcmkONERROR(gckHARDWARE_End(
++ Event->kernel->hardware, Logical, waitSize
++ ));
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Flush the cache for the END. */
++ gcmkONERROR(gckOS_CacheClean(
++ Event->os,
++ ProcessID,
++ gcvNULL,
++ Handle,
++ Logical,
++ *waitSize
++ ));
++#endif
++
++ /* Wait for the signal. */
++ gcmkONERROR(gckOS_WaitSignal(Event->os, Signal, gcvINFINITE));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_PrintRecord(
++ gcsEVENT_PTR record
++ )
++{
++ switch (record->info.command)
++ {
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_NON_PAGED_MEMORY");
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_CONTIGUOUS_MEMORY");
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ gcmkPRINT(" gcvHAL_FREE_VIDEO_MEMORY");
++ break;
++
++ case gcvHAL_WRITE_DATA:
++ gcmkPRINT(" gcvHAL_WRITE_DATA");
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ gcmkPRINT(" gcvHAL_UNLOCK_VIDEO_MEMORY");
++ break;
++
++ case gcvHAL_SIGNAL:
++ gcmkPRINT(" gcvHAL_SIGNAL process=%d signal=0x%x",
++ record->info.u.Signal.process,
++ record->info.u.Signal.signal);
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ gcmkPRINT(" gcvHAL_UNMAP_USER_MEMORY");
++ break;
++
++ case gcvHAL_TIMESTAMP:
++ gcmkPRINT(" gcvHAL_TIMESTAMP");
++ break;
++
++ case gcvHAL_COMMIT_DONE:
++ gcmkPRINT(" gcvHAL_COMMIT_DONE");
++ break;
++
++ case gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER:
++ gcmkPRINT(" gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER logical=0x%08x",
++ record->info.u.FreeVirtualCommandBuffer.logical);
++ break;
++
++ default:
++ gcmkPRINT(" Illegal Event %d", record->info.command);
++ break;
++ }
++}
++
++/*******************************************************************************
++** gckEVENT_Dump
++**
++** Dump record in event queue when stuck happens.
++** No protection for the event queue.
++**/
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ )
++{
++ gcsEVENT_QUEUE_PTR queueHead = Event->queueHead;
++ gcsEVENT_QUEUE_PTR queue;
++ gcsEVENT_PTR record = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("Event=0x%x", Event);
++
++ gcmkPRINT("**************************\n");
++ gcmkPRINT("*** EVENT STATE DUMP ***\n");
++ gcmkPRINT("**************************\n");
++
++
++ gcmkPRINT(" Unsumbitted Event:");
++ while(queueHead)
++ {
++ queue = queueHead;
++ record = queueHead->head;
++
++ gcmkPRINT(" [%x]:", queue);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++
++ if (queueHead == Event->queueTail)
++ {
++ queueHead = gcvNULL;
++ }
++ else
++ {
++ queueHead = queueHead->next;
++ }
++ }
++
++ gcmkPRINT(" Untriggered Event:");
++ for (i = 0; i < 30; i++)
++ {
++ queue = &Event->queues[i];
++ record = queue->head;
++
++ gcmkPRINT(" [%d]:", i);
++ while(record)
++ {
++ _PrintRecord(record);
++ record = record->next;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS gckEVENT_WaitEmpty(gckEVENT Event)
++{
++ gctBOOL isEmpty;
++
++ while (Event->inNotify || (gcmIS_SUCCESS(gckEVENT_IsEmpty(Event, &isEmpty)) && !isEmpty)) ;
++
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel.h 2015-07-27 23:13:06.190893891 +0200
+@@ -0,0 +1,1007 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_h_
++#define __gc_hal_kernel_h_
++
++#include <linux/spinlock.h>
++
++#include "gc_hal.h"
++#include "gc_hal_kernel_hardware.h"
++#include "gc_hal_driver.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_kernel_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++/*******************************************************************************
++***** New MMU Defination *******************************************************/
++#define gcdMMU_MTLB_SHIFT 22
++#define gcdMMU_STLB_4K_SHIFT 12
++#define gcdMMU_STLB_64K_SHIFT 16
++
++#define gcdMMU_MTLB_BITS (32 - gcdMMU_MTLB_SHIFT)
++#define gcdMMU_PAGE_4K_BITS gcdMMU_STLB_4K_SHIFT
++#define gcdMMU_STLB_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_4K_BITS)
++#define gcdMMU_PAGE_64K_BITS gcdMMU_STLB_64K_SHIFT
++#define gcdMMU_STLB_64K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_PAGE_64K_BITS)
++
++#define gcdMMU_MTLB_ENTRY_NUM (1 << gcdMMU_MTLB_BITS)
++#define gcdMMU_MTLB_SIZE (gcdMMU_MTLB_ENTRY_NUM << 2)
++#define gcdMMU_STLB_4K_ENTRY_NUM (1 << gcdMMU_STLB_4K_BITS)
++#define gcdMMU_STLB_4K_SIZE (gcdMMU_STLB_4K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_4K_SIZE (1 << gcdMMU_STLB_4K_SHIFT)
++#define gcdMMU_STLB_64K_ENTRY_NUM (1 << gcdMMU_STLB_64K_BITS)
++#define gcdMMU_STLB_64K_SIZE (gcdMMU_STLB_64K_ENTRY_NUM << 2)
++#define gcdMMU_PAGE_64K_SIZE (1 << gcdMMU_STLB_64K_SHIFT)
++
++#define gcdMMU_MTLB_MASK (~((1U << gcdMMU_MTLB_SHIFT)-1))
++#define gcdMMU_STLB_4K_MASK ((~0U << gcdMMU_STLB_4K_SHIFT) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_4K_MASK (gcdMMU_PAGE_4K_SIZE - 1)
++#define gcdMMU_STLB_64K_MASK ((~((1U << gcdMMU_STLB_64K_SHIFT)-1)) ^ gcdMMU_MTLB_MASK)
++#define gcdMMU_PAGE_64K_MASK (gcdMMU_PAGE_64K_SIZE - 1)
++
++/* Page offset definitions. */
++#define gcdMMU_OFFSET_4K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_4K_BITS)
++#define gcdMMU_OFFSET_4K_MASK ((1U << gcdMMU_OFFSET_4K_BITS) - 1)
++#define gcdMMU_OFFSET_16K_BITS (32 - gcdMMU_MTLB_BITS - gcdMMU_STLB_16K_BITS)
++#define gcdMMU_OFFSET_16K_MASK ((1U << gcdMMU_OFFSET_16K_BITS) - 1)
++
++/*******************************************************************************
++***** Process Secure Cache ****************************************************/
++
++#define gcdSECURE_CACHE_LRU 1
++#define gcdSECURE_CACHE_LINEAR 2
++#define gcdSECURE_CACHE_HASH 3
++#define gcdSECURE_CACHE_TABLE 4
++
++typedef struct _gcskLOGICAL_CACHE * gcskLOGICAL_CACHE_PTR;
++typedef struct _gcskLOGICAL_CACHE gcskLOGICAL_CACHE;
++struct _gcskLOGICAL_CACHE
++{
++ /* Logical address. */
++ gctPOINTER logical;
++
++ /* DMAable address. */
++ gctUINT32 dma;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Pointer to the previous and next hash tables. */
++ gcskLOGICAL_CACHE_PTR nextHash;
++ gcskLOGICAL_CACHE_PTR prevHash;
++#endif
++
++#if gcdSECURE_CACHE_METHOD != gcdSECURE_CACHE_TABLE
++ /* Pointer to the previous and next slot. */
++ gcskLOGICAL_CACHE_PTR next;
++ gcskLOGICAL_CACHE_PTR prev;
++#endif
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_LINEAR
++ /* Time stamp. */
++ gctUINT64 stamp;
++#endif
++};
++
++typedef struct _gcskSECURE_CACHE * gcskSECURE_CACHE_PTR;
++typedef struct _gcskSECURE_CACHE
++{
++ /* Cache memory. */
++ gcskLOGICAL_CACHE cache[1 + gcdSECURE_CACHE_SLOTS];
++
++ /* Last known index for LINEAR mode. */
++ gcskLOGICAL_CACHE_PTR cacheIndex;
++
++ /* Current free slot for LINEAR mode. */
++ gctUINT32 cacheFree;
++
++ /* Time stamp for LINEAR mode. */
++ gctUINT64 cacheStamp;
++
++#if gcdSECURE_CACHE_METHOD == gcdSECURE_CACHE_HASH
++ /* Hash table for HASH mode. */
++ gcskLOGICAL_CACHE hash[256];
++#endif
++}
++gcskSECURE_CACHE;
++
++/*******************************************************************************
++***** Process Database Management *********************************************/
++
++typedef enum _gceDATABASE_TYPE
++{
++ gcvDB_VIDEO_MEMORY = 1, /* Video memory created. */
++ gcvDB_COMMAND_BUFFER, /* Command Buffer. */
++ gcvDB_NON_PAGED, /* Non paged memory. */
++ gcvDB_CONTIGUOUS, /* Contiguous memory. */
++ gcvDB_SIGNAL, /* Signal. */
++ gcvDB_VIDEO_MEMORY_LOCKED, /* Video memory locked. */
++ gcvDB_CONTEXT, /* Context */
++ gcvDB_IDLE, /* GPU idle. */
++ gcvDB_MAP_MEMORY, /* Map memory */
++ gcvDB_SHARED_INFO, /* Private data */
++ gcvDB_MAP_USER_MEMORY, /* Map user memory */
++ gcvDB_SYNC_POINT, /* Sync point. */
++ gcvDB_VIDEO_MEMORY_RESERVED, /* Reserved video memory */
++ gcvDB_VIDEO_MEMORY_CONTIGUOUS, /* Contiguous video memory */
++ gcvDB_VIDEO_MEMORY_VIRTUAL, /* Virtual video memory */
++}
++gceDATABASE_TYPE;
++
++typedef struct _gcsDATABASE_RECORD * gcsDATABASE_RECORD_PTR;
++typedef struct _gcsDATABASE_RECORD
++{
++ /* Pointer to kernel. */
++ gckKERNEL kernel;
++
++ /* Pointer to next database record. */
++ gcsDATABASE_RECORD_PTR next;
++
++ /* Type of record. */
++ gceDATABASE_TYPE type;
++
++ /* Data for record. */
++ gctPOINTER data;
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++}
++gcsDATABASE_RECORD;
++
++typedef struct _gcsDATABASE * gcsDATABASE_PTR;
++typedef struct _gcsDATABASE
++{
++ /* Pointer to next entry is hash list. */
++ gcsDATABASE_PTR next;
++ gctSIZE_T slot;
++
++ /* Process ID. */
++ gctUINT32 processID;
++
++ /* Sizes to query. */
++ gcsDATABASE_COUNTERS vidMem;
++ gcsDATABASE_COUNTERS nonPaged;
++ gcsDATABASE_COUNTERS contiguous;
++ gcsDATABASE_COUNTERS mapUserMemory;
++ gcsDATABASE_COUNTERS mapMemory;
++ gcsDATABASE_COUNTERS vidMemResv;
++ gcsDATABASE_COUNTERS vidMemCont;
++ gcsDATABASE_COUNTERS vidMemVirt;
++
++ /* Idle time management. */
++ gctUINT64 lastIdle;
++ gctUINT64 idle;
++
++ /* Pointer to database. */
++ gcsDATABASE_RECORD_PTR list[48];
++
++#if gcdSECURE_USER
++ /* Secure cache. */
++ gcskSECURE_CACHE cache;
++#endif
++
++ gctPOINTER handleDatabase;
++ gctPOINTER handleDatabaseMutex;
++}
++gcsDATABASE;
++
++/* Create a process database that will contain all its allocations. */
++gceSTATUS
++gckKERNEL_CreateProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Add a record to the process database. */
++gceSTATUS
++gckKERNEL_AddProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Size
++ );
++
++/* Remove a record to the process database. */
++gceSTATUS
++gckKERNEL_RemoveProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer
++ );
++
++/* Destroy the process database. */
++gceSTATUS
++gckKERNEL_DestroyProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID
++ );
++
++/* Find a record to the process database. */
++gceSTATUS
++gckKERNEL_FindProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctUINT32 ThreadID,
++ IN gceDATABASE_TYPE Type,
++ IN gctPOINTER Pointer,
++ OUT gcsDATABASE_RECORD_PTR Record
++ );
++
++/* Query the process database. */
++gceSTATUS
++gckKERNEL_QueryProcessDB(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ IN gctBOOL LastProcessID,
++ IN gceDATABASE_TYPE Type,
++ OUT gcuDATABASE_INFO * Info
++ );
++
++/* Dump the process database. */
++gceSTATUS
++gckKERNEL_DumpProcessDB(
++ IN gckKERNEL Kernel
++ );
++
++/* ID database */
++gceSTATUS
++gckKERNEL_CreateIntegerDatabase(
++ IN gckKERNEL Kernel,
++ OUT gctPOINTER * Database
++ );
++
++gceSTATUS
++gckKERNEL_DestroyIntegerDatabase(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Database
++ );
++
++gceSTATUS
++gckKERNEL_AllocateIntegerId(
++ IN gctPOINTER Database,
++ IN gctPOINTER Pointer,
++ OUT gctUINT32 * Id
++ );
++
++gceSTATUS
++gckKERNEL_FreeIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id
++ );
++
++gceSTATUS
++gckKERNEL_QueryIntegerId(
++ IN gctPOINTER Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * Pointer
++ );
++
++gctUINT32
++gckKERNEL_AllocateNameFromPointer(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Pointer
++ );
++
++gctPOINTER
++gckKERNEL_QueryPointerFromName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++gceSTATUS
++gckKERNEL_DeleteName(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Name
++ );
++
++#if gcdSECURE_USER
++/* Get secure cache from the process database. */
++gceSTATUS
++gckKERNEL_GetProcessDBCache(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 ProcessID,
++ OUT gcskSECURE_CACHE_PTR * Cache
++ );
++#endif
++
++/*******************************************************************************
++********* Timer Management ****************************************************/
++typedef struct _gcsTIMER * gcsTIMER_PTR;
++typedef struct _gcsTIMER
++{
++ /* Start and Stop time holders. */
++ gctUINT64 startTime;
++ gctUINT64 stopTime;
++}
++gcsTIMER;
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckDB object. */
++struct _gckDB
++{
++ /* Database management. */
++ gcsDATABASE_PTR db[16];
++ gctPOINTER dbMutex;
++ gcsDATABASE_PTR freeDatabase;
++ gcsDATABASE_RECORD_PTR freeRecord;
++ gcsDATABASE_PTR lastDatabase;
++ gctUINT32 lastProcessID;
++ gctUINT64 lastIdle;
++ gctUINT64 idleTime;
++ gctUINT64 lastSlowdown;
++ gctUINT64 lastSlowdownIdle;
++ /* ID - Pointer database*/
++ gctPOINTER pointerDatabase;
++ gctPOINTER pointerDatabaseMutex;
++};
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++typedef struct _gckVIRTUAL_COMMAND_BUFFER * gckVIRTUAL_COMMAND_BUFFER_PTR;
++typedef struct _gckVIRTUAL_COMMAND_BUFFER
++{
++ gctPHYS_ADDR physical;
++ gctPOINTER userLogical;
++ gctPOINTER kernelLogical;
++ gctSIZE_T pageCount;
++ gctPOINTER pageTable;
++ gctUINT32 gpuAddress;
++ gctUINT pid;
++ gckVIRTUAL_COMMAND_BUFFER_PTR next;
++ gckVIRTUAL_COMMAND_BUFFER_PTR prev;
++ gckKERNEL kernel;
++}
++gckVIRTUAL_COMMAND_BUFFER;
++#endif
++
++/* gckKERNEL object. */
++struct _gckKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Core */
++ gceCORE core;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* Pointer to gckCOMMAND object. */
++ gckCOMMAND command;
++
++ /* Pointer to gckEVENT object. */
++ gckEVENT eventObj;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckMMU mmu;
++
++ /* Arom holding number of clients. */
++ gctPOINTER atomClients;
++
++#if VIVANTE_PROFILER
++ /* Enable profiling */
++ gctBOOL profileEnable;
++
++ /* Clear profile register or not*/
++ gctBOOL profileCleanRegister;
++
++#endif
++
++#ifdef QNX_SINGLE_THREADED_DEBUGGING
++ gctPOINTER debugMutex;
++#endif
++
++ /* Database management. */
++ gckDB db;
++ gctBOOL dbCreated;
++
++#if gcdENABLE_RECOVERY
++ gctPOINTER resetFlagClearTimer;
++ gctPOINTER resetAtom;
++ gctUINT64 resetTimeStamp;
++#endif
++
++ /* Pointer to gckEVENT object. */
++ gcsTIMER timers[8];
++ gctUINT32 timeOut;
++
++#if gcdENABLE_VG
++ gckVGKERNEL vg;
++#endif
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferHead;
++ gckVIRTUAL_COMMAND_BUFFER_PTR virtualBufferTail;
++ gctPOINTER virtualBufferLock;
++#endif
++
++#if gcdDVFS
++ gckDVFS dvfs;
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ gctHANDLE timeline;
++#endif
++
++ spinlock_t irq_lock;
++
++ gctPOINTER vidmemMutex;
++};
++
++struct _FrequencyHistory
++{
++ gctUINT32 frequency;
++ gctUINT32 count;
++};
++
++/* gckDVFS object. */
++struct _gckDVFS
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gctPOINTER timer;
++ gctUINT32 pollingTime;
++ gctBOOL stop;
++ gctUINT32 totalConfig;
++ gctUINT32 loads[8];
++ gctUINT8 currentScale;
++ struct _FrequencyHistory frequencyHistory[16];
++};
++
++/* gckCOMMAND object. */
++struct _gckCOMMAND
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to required object. */
++ gckKERNEL kernel;
++ gckOS os;
++
++ /* Number of bytes per page. */
++ gctSIZE_T pageSize;
++
++ /* Current pipe select. */
++ gcePIPE_SELECT pipeSelect;
++
++ /* Command queue running flag. */
++ gctBOOL running;
++
++ /* Idle flag and commit stamp. */
++ gctBOOL idle;
++ gctUINT64 commitStamp;
++
++ /* Command queue mutex. */
++ gctPOINTER mutexQueue;
++
++ /* Context switching mutex. */
++ gctPOINTER mutexContext;
++
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context sequence mutex. */
++ gctPOINTER mutexContextSeq;
++#endif
++
++ /* Command queue power semaphore. */
++ gctPOINTER powerSemaphore;
++
++ /* Current command queue. */
++ struct _gcskCOMMAND_QUEUE
++ {
++ gctSIGNAL signal;
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ }
++ queues[gcdCOMMAND_QUEUES];
++
++ gctPHYS_ADDR physical;
++ gctPOINTER logical;
++ gctUINT32 offset;
++ gctINT index;
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++ gctUINT wrapCount;
++#endif
++
++ /* The command queue is new. */
++ gctBOOL newQueue;
++
++ /* Context management. */
++ gckCONTEXT currContext;
++
++ /* Pointer to last WAIT command. */
++ gctPHYS_ADDR waitPhysical;
++ gctPOINTER waitLogical;
++ gctSIZE_T waitSize;
++
++ /* Command buffer alignment. */
++ gctSIZE_T alignment;
++ gctSIZE_T reservedHead;
++ gctSIZE_T reservedTail;
++
++ /* Commit counter. */
++ gctPOINTER atomCommit;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* End Event signal. */
++ gctSIGNAL endEventSignal;
++
++#if gcdSECURE_USER
++ /* Hint array copy buffer. */
++ gctBOOL hintArrayAllocated;
++ gctUINT hintArraySize;
++ gctUINT32_PTR hintArray;
++#endif
++};
++
++typedef struct _gcsEVENT * gcsEVENT_PTR;
++
++/* Structure holding one event to be processed. */
++typedef struct _gcsEVENT
++{
++ /* Pointer to next event in queue. */
++ gcsEVENT_PTR next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE info;
++
++ /* Process ID owning the event. */
++ gctUINT32 processID;
++
++#ifdef __QNXNTO__
++ /* Kernel. */
++ gckKERNEL kernel;
++#endif
++
++ gctBOOL fromKernel;
++}
++gcsEVENT;
++
++/* Structure holding a list of events to be processed by an interrupt. */
++typedef struct _gcsEVENT_QUEUE * gcsEVENT_QUEUE_PTR;
++typedef struct _gcsEVENT_QUEUE
++{
++ /* Time stamp. */
++ gctUINT64 stamp;
++
++ /* Source of the event. */
++ gceKERNEL_WHERE source;
++
++ /* Pointer to head of event queue. */
++ gcsEVENT_PTR head;
++
++ /* Pointer to tail of event queue. */
++ gcsEVENT_PTR tail;
++
++ /* Next list of events. */
++ gcsEVENT_QUEUE_PTR next;
++}
++gcsEVENT_QUEUE;
++
++/*
++ gcdREPO_LIST_COUNT defines the maximum number of event queues with different
++ hardware module sources that may coexist at the same time. Only two sources
++ are supported - gcvKERNEL_COMMAND and gcvKERNEL_PIXEL. gcvKERNEL_COMMAND
++ source is used only for managing the kernel command queue and is only issued
++ when the current command queue gets full. Since we commit event queues every
++ time we commit command buffers, in the worst case we can have up to three
++ pending event queues:
++ - gcvKERNEL_PIXEL
++ - gcvKERNEL_COMMAND (queue overflow)
++ - gcvKERNEL_PIXEL
++*/
++#define gcdREPO_LIST_COUNT 3
++
++/* gckEVENT object. */
++struct _gckEVENT
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to required objects. */
++ gckOS os;
++ gckKERNEL kernel;
++
++ /* Time stamp. */
++ gctUINT64 stamp;
++ gctUINT64 lastCommitStamp;
++
++ /* Queue mutex. */
++ gctPOINTER eventQueueMutex;
++
++ /* Array of event queues. */
++ gcsEVENT_QUEUE queues[30];
++ gctUINT8 lastID;
++ gctPOINTER freeAtom;
++
++ /* Pending events. */
++#if gcdSMP
++ gctPOINTER pending;
++#else
++ volatile gctUINT pending;
++#endif
++
++ /* List of free event structures and its mutex. */
++ gcsEVENT_PTR freeEventList;
++ gctSIZE_T freeEventCount;
++ gctPOINTER freeEventMutex;
++
++ /* Event queues. */
++ gcsEVENT_QUEUE_PTR queueHead;
++ gcsEVENT_QUEUE_PTR queueTail;
++ gcsEVENT_QUEUE_PTR freeList;
++ gcsEVENT_QUEUE repoList[gcdREPO_LIST_COUNT];
++ gctPOINTER eventListMutex;
++
++ gctPOINTER submitTimer;
++
++ volatile gctBOOL inNotify;
++};
++
++/* Free all events belonging to a process. */
++gceSTATUS
++gckEVENT_FreeProcess(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID
++ );
++
++gceSTATUS
++gckEVENT_Stop(
++ IN gckEVENT Event,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Logical,
++ IN gctSIGNAL Signal,
++ IN OUT gctSIZE_T * waitSize
++ );
++
++gceSTATUS
++gckEVENT_WaitEmpty(
++ IN gckEVENT Event
++ );
++
++/* gcuVIDMEM_NODE structure. */
++typedef union _gcuVIDMEM_NODE
++{
++ /* Allocated from gckVIDMEM. */
++ struct _gcsVIDMEM_NODE_VIDMEM
++ {
++ /* Owner of this node. */
++ gckVIDMEM memory;
++
++ /* Dual-linked list of nodes. */
++ gcuVIDMEM_NODE_PTR next;
++ gcuVIDMEM_NODE_PTR prev;
++
++ /* Dual linked list of free nodes. */
++ gcuVIDMEM_NODE_PTR nextFree;
++ gcuVIDMEM_NODE_PTR prevFree;
++
++ /* Information for this node. */
++ gctUINT32 offset;
++ gctSIZE_T bytes;
++ gctUINT32 alignment;
++
++#ifdef __QNXNTO__
++ /* Client/server vaddr (mapped using mmap_join). */
++ gctPOINTER logical;
++#endif
++
++ /* Locked counter. */
++ gctINT32 locked;
++
++ /* Memory pool. */
++ gcePOOL pool;
++ gctUINT32 physical;
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++ /* Prevent compositor from freeing until client unlocks. */
++ gctBOOL freePending;
++
++ /* */
++ gcsVIDMEM_NODE_SHARED_INFO sharedInfo;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ gctPOINTER kernelVirtual;
++#endif
++
++ /* Surface type. */
++ gceSURF_TYPE type;
++ }
++ VidMem;
++
++ /* Allocated from gckOS. */
++ struct _gcsVIDMEM_NODE_VIRTUAL
++ {
++ /* Pointer to gckKERNEL object. */
++ gckKERNEL kernel;
++
++ /* Information for this node. */
++ /* Contiguously allocated? */
++ gctBOOL contiguous;
++ /* mdl record pointer... a kmalloc address. Process agnostic. */
++ gctPHYS_ADDR physical;
++ gctSIZE_T bytes;
++ /* do_mmap_pgoff address... mapped per-process. */
++ gctPOINTER logical;
++
++ /* Page table information. */
++ /* Used only when node is not contiguous */
++ gctSIZE_T pageCount;
++
++ /* Used only when node is not contiguous */
++ gctPOINTER pageTables[gcdMAX_GPU_COUNT];
++ /* Pointer to gckKERNEL object who lock this. */
++ gckKERNEL lockKernels[gcdMAX_GPU_COUNT];
++ /* Actual physical address */
++ gctUINT32 addresses[gcdMAX_GPU_COUNT];
++
++ /* Locked counter. */
++ gctINT32 lockeds[gcdMAX_GPU_COUNT];
++
++#ifdef __QNXNTO__
++ /* Single linked list of nodes. */
++ gcuVIDMEM_NODE_PTR next;
++
++ /* Unlock pending flag. */
++ gctBOOL unlockPendings[gcdMAX_GPU_COUNT];
++
++ /* Free pending flag. */
++ gctBOOL freePending;
++#endif
++
++ /* Process ID owning this memory. */
++ gctUINT32 processID;
++
++ /* Owner process sets freed to true
++ * when it trys to free a locked
++ * node */
++ gctBOOL freed;
++
++ /* */
++ gcsVIDMEM_NODE_SHARED_INFO sharedInfo;
++
++ /* Surface type. */
++ gceSURF_TYPE type;
++ }
++ Virtual;
++}
++gcuVIDMEM_NODE;
++
++/* gckVIDMEM object. */
++struct _gckVIDMEM
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Information for this video memory heap. */
++ gctUINT32 baseAddress;
++ gctSIZE_T bytes;
++ gctSIZE_T freeBytes;
++
++ /* Mapping for each type of surface. */
++ gctINT mapping[gcvSURF_NUM_TYPES];
++
++ /* Sentinel nodes for up to 8 banks. */
++ gcuVIDMEM_NODE sentinel[8];
++
++ /* Allocation threshold. */
++ gctSIZE_T threshold;
++
++#if gcdUSE_VIDMEM_PER_PID
++ /* The Pid this VidMem belongs to. */
++ gctUINT32 pid;
++
++ struct _gckVIDMEM* next;
++#endif
++};
++
++/* gckMMU object. */
++struct _gckMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER pageTableMutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctUINT32_PTR pageTableLogical;
++ gctUINT32 pageTableEntries;
++
++ /* Master TLB information. */
++ gctSIZE_T mtlbSize;
++ gctPHYS_ADDR mtlbPhysical;
++ gctUINT32_PTR mtlbLogical;
++ gctUINT32 mtlbEntries;
++
++ /* Free entries. */
++ gctUINT32 heapList;
++ gctBOOL freeNodes;
++
++ gctPOINTER staticSTLB;
++ gctBOOL enabled;
++
++ gctUINT32 dynamicMappingStart;
++
++#ifdef __QNXNTO__
++ /* Single linked list of all allocated nodes. */
++ gctPOINTER nodeMutex;
++ gcuVIDMEM_NODE_PTR nodeList;
++#endif
++};
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gctPHYS_ADDR Physical,
++ OUT gctSIZE_T * PageCount,
++ OUT gctPOINTER * Logical
++ );
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckKERNEL_AllocateVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++gceSTATUS
++gckKERNEL_DestroyVirtualCommandBuffer(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++gceSTATUS
++gckKERNEL_GetGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckKERNEL_QueryGPUAddress(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GpuAddress,
++ OUT gckVIRTUAL_COMMAND_BUFFER_PTR * Buffer
++ );
++#endif
++
++gceSTATUS
++gckKERNEL_AttachProcess(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach
++ );
++
++gceSTATUS
++gckKERNEL_AttachProcessEx(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Attach,
++ IN gctUINT32 PID
++ );
++
++#if gcdSECURE_USER
++gceSTATUS
++gckKERNEL_MapLogicalToPhysical(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN OUT gctPOINTER * Data
++ );
++
++gceSTATUS
++gckKERNEL_FlushTranslationCache(
++ IN gckKERNEL Kernel,
++ IN gcskSECURE_CACHE_PTR Cache,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++#endif
++
++gceSTATUS
++gckHARDWARE_QueryIdle(
++ IN gckHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++
++/******************************************************************************\
++******************************* gckCONTEXT Object *******************************
++\******************************************************************************/
++
++gceSTATUS
++gckCONTEXT_Construct(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ OUT gckCONTEXT * Context
++ );
++
++gceSTATUS
++gckCONTEXT_Destroy(
++ IN gckCONTEXT Context
++ );
++
++gceSTATUS
++gckCONTEXT_Update(
++ IN gckCONTEXT Context,
++ IN gctUINT32 ProcessID,
++ IN gcsSTATE_DELTA_PTR StateDelta
++ );
++
++#if gcdLINK_QUEUE_SIZE
++void
++gckLINKQUEUE_Enqueue(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 start,
++ IN gctUINT32 end
++ );
++
++void
++gckLINKQUEUE_GetData(
++ IN gckLINKQUEUE LinkQueue,
++ IN gctUINT32 Index,
++ OUT gckLINKDATA * Data
++ );
++#endif
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_heap.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_heap.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_heap.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_heap.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,859 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/**
++** @file
++** gckHEAP object for kernel HAL layer. The heap implemented here is an arena-
++** based memory allocation. An arena-based memory heap allocates data quickly
++** from specified arenas and reduces memory fragmentation.
++**
++*/
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_HEAP
++
++/*******************************************************************************
++***** Structures ***************************************************************
++*******************************************************************************/
++
++#define gcdIN_USE ((gcskNODE_PTR) ~0)
++
++typedef struct _gcskNODE * gcskNODE_PTR;
++typedef struct _gcskNODE
++{
++ /* Number of byets in node. */
++ gctSIZE_T bytes;
++
++ /* Pointer to next free node, or gcvNULL to mark the node as freed, or
++ ** gcdIN_USE to mark the node as used. */
++ gcskNODE_PTR next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Time stamp of allocation. */
++ gctUINT64 timeStamp;
++#endif
++}
++gcskNODE;
++
++typedef struct _gcskHEAP * gcskHEAP_PTR;
++typedef struct _gcskHEAP
++{
++ /* Linked list. */
++ gcskHEAP_PTR next;
++ gcskHEAP_PTR prev;
++
++ /* Heap size. */
++ gctSIZE_T size;
++
++ /* Free list. */
++ gcskNODE_PTR freeList;
++}
++gcskHEAP;
++
++struct _gckHEAP
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to a gckOS object. */
++ gckOS os;
++
++ /* Locking mutex. */
++ gctPOINTER mutex;
++
++ /* Allocation parameters. */
++ gctSIZE_T allocationSize;
++
++ /* Heap list. */
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT64 timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Profile information. */
++ gctUINT32 allocCount;
++ gctUINT64 allocBytes;
++ gctUINT64 allocBytesMax;
++ gctUINT64 allocBytesTotal;
++ gctUINT32 heapCount;
++ gctUINT32 heapCountMax;
++ gctUINT64 heapMemory;
++ gctUINT64 heapMemoryMax;
++#endif
++};
++
++/*******************************************************************************
++***** Static Support Functions *************************************************
++*******************************************************************************/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++static gctSIZE_T
++_DumpHeap(
++ IN gcskHEAP_PTR Heap
++ )
++{
++ gctPOINTER p;
++ gctSIZE_T leaked = 0;
++
++ /* Start at first node. */
++ for (p = Heap + 1;;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ /* Check if this is a used node. */
++ if (node->next == gcdIN_USE)
++ {
++ /* Print the leaking node. */
++ gcmkTRACE_ZONE(gcvLEVEL_WARNING, gcvZONE_HEAP,
++ "Detected leaking: node=0x%x bytes=%lu timeStamp=%llu "
++ "(%08X %c%c%c%c)",
++ node, node->bytes, node->timeStamp,
++ ((gctUINT32_PTR) (node + 1))[0],
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[0]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[1]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[2]),
++ gcmPRINTABLE(((gctUINT8_PTR) (node + 1))[3]));
++
++ /* Add leaking byte count. */
++ leaked += node->bytes;
++ }
++
++ /* Test for end of heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ else
++ {
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++ }
++
++ /* Return the number of leaked bytes. */
++ return leaked;
++}
++#endif
++
++static gceSTATUS
++_CompactKernelHeap(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap, next;
++ gctPOINTER p;
++ gcskHEAP_PTR freeList = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = next)
++ {
++ gcskNODE_PTR lastFree = gcvNULL;
++
++ /* Zero out the free list. */
++ heap->freeList = gcvNULL;
++
++ /* Start at the first node. */
++ for (p = (gctUINT8_PTR) (heap + 1);;)
++ {
++ /* Convert the pointer. */
++ gcskNODE_PTR node = (gcskNODE_PTR) p;
++
++ gcmkASSERT(p <= (gctPOINTER) ((gctUINT8_PTR) (heap + 1) + heap->size));
++
++ /* Test if this node not used. */
++ if (node->next != gcdIN_USE)
++ {
++ /* Test if this is the end of the heap. */
++ if (node->bytes == 0)
++ {
++ break;
++ }
++
++ /* Test of this is the first free node. */
++ else if (lastFree == gcvNULL)
++ {
++ /* Initialzie the free list. */
++ heap->freeList = node;
++ lastFree = node;
++ }
++
++ else
++ {
++ /* Test if this free node is contiguous with the previous
++ ** free node. */
++ if ((gctUINT8_PTR) lastFree + lastFree->bytes == p)
++ {
++ /* Just increase the size of the previous free node. */
++ lastFree->bytes += node->bytes;
++ }
++ else
++ {
++ /* Add to linked list. */
++ lastFree->next = node;
++ lastFree = node;
++ }
++ }
++ }
++
++ /* Move to next node. */
++ p = (gctUINT8_PTR) node + node->bytes;
++ }
++
++ /* Mark the end of the chain. */
++ if (lastFree != gcvNULL)
++ {
++ lastFree->next = gcvNULL;
++ }
++
++ /* Get next heap. */
++ next = heap->next;
++
++ /* Check if the entire heap is free. */
++ if ((heap->freeList != gcvNULL)
++ && (heap->freeList->bytes == heap->size - gcmSIZEOF(gcskNODE))
++ )
++ {
++ /* Remove the heap from the linked list. */
++ if (heap->prev == gcvNULL)
++ {
++ Heap->heap = next;
++ }
++ else
++ {
++ heap->prev->next = next;
++ }
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount -= 1;
++ Heap->heapMemory -= heap->size + gcmSIZEOF(gcskHEAP);
++#endif
++
++ /* Add this heap to the list of heaps that need to be freed. */
++ heap->next = freeList;
++ freeList = heap;
++ }
++ }
++
++ if (freeList != gcvNULL)
++ {
++ /* Release the mutex, remove any chance for a dead lock. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Free all heaps in the free list. */
++ for (heap = freeList; heap != gcvNULL; heap = next)
++ {
++ /* Get pointer to the next heap. */
++ next = heap->next;
++
++ /* Free the heap. */
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Freeing heap 0x%x (%lu bytes)",
++ heap, heap->size + gcmSIZEOF(gcskHEAP));
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Acquire the mutex again. */
++ gcmkVERIFY_OK(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++***** gckHEAP API Code *********************************************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckHEAP_Construct
++**
++** Construct a new gckHEAP object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctSIZE_T AllocationSize
++** Minimum size per arena.
++**
++** OUTPUT:
++**
++** gckHEAP * Heap
++** Pointer to a variable that will hold the pointer to the gckHEAP
++** object.
++*/
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ )
++{
++ gceSTATUS status;
++ gckHEAP heap = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x AllocationSize=%lu", Os, AllocationSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Heap != gcvNULL);
++
++ /* Allocate the gckHEAP object. */
++ gcmkONERROR(gckOS_AllocateMemory(Os,
++ gcmSIZEOF(struct _gckHEAP),
++ &pointer));
++
++ heap = pointer;
++
++ /* Initialize the gckHEAP object. */
++ heap->object.type = gcvOBJ_HEAP;
++ heap->os = Os;
++ heap->allocationSize = AllocationSize;
++ heap->heap = gcvNULL;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ heap->timeStamp = 0;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Zero the counters. */
++ heap->allocCount = 0;
++ heap->allocBytes = 0;
++ heap->allocBytesMax = 0;
++ heap->allocBytesTotal = 0;
++ heap->heapCount = 0;
++ heap->heapCountMax = 0;
++ heap->heapMemory = 0;
++ heap->heapMemoryMax = 0;
++#endif
++
++ /* Create the mutex. */
++ gcmkONERROR(gckOS_CreateMutex(Os, &heap->mutex));
++
++ /* Return the pointer to the gckHEAP object. */
++ *Heap = heap;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Heap=0x%x", *Heap);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (heap != gcvNULL)
++ {
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Os, heap));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Destroy
++**
++** Destroy a gckHEAP object.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ )
++{
++ gcskHEAP_PTR heap;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctSIZE_T leaked = 0;
++#endif
++
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ for (heap = Heap->heap; heap != gcvNULL; heap = Heap->heap)
++ {
++ /* Unlink heap from linked list. */
++ Heap->heap = heap->next;
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Check for leaked memory. */
++ leaked += _DumpHeap(heap);
++#endif
++
++ /* Free the heap. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, heap));
++ }
++
++ /* Free the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Heap->os, Heap->mutex));
++
++ /* Free the heap structure. */
++ gcmkVERIFY_OK(gckOS_FreeMemory(Heap->os, Heap));
++
++ /* Success. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkFOOTER_ARG("leaked=%lu", leaked);
++#else
++ gcmkFOOTER_NO();
++#endif
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Allocate
++**
++** Allocate data from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the address of the allocated
++** memory.
++*/
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctBOOL acquired = gcvFALSE;
++ gcskHEAP_PTR heap;
++ gceSTATUS status;
++ gctSIZE_T bytes;
++ gcskNODE_PTR node, used, prevFree = gcvNULL;
++ gctPOINTER memory = gcvNULL;
++
++ gcmkHEADER_ARG("Heap=0x%x Bytes=%lu", Heap, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Determine number of bytes required for a node. */
++ bytes = gcmALIGN(Bytes + gcmSIZEOF(gcskNODE), 8);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Check if this allocation is bigger than the default allocation size. */
++ if (bytes > Heap->allocationSize - gcmSIZEOF(gcskHEAP) - gcmSIZEOF(gcskNODE))
++ {
++ /* Adjust allocation size. */
++ Heap->allocationSize = bytes * 2;
++ }
++
++ else if (Heap->heap != gcvNULL)
++ {
++ gctINT i;
++
++ /* 2 retries, since we might need to compact. */
++ for (i = 0; i < 2; ++i)
++ {
++ /* Walk all the heaps. */
++ for (heap = Heap->heap; heap != gcvNULL; heap = heap->next)
++ {
++ /* Check if this heap has enough bytes to hold the request. */
++ if (bytes <= heap->size - gcmSIZEOF(gcskNODE))
++ {
++ prevFree = gcvNULL;
++
++ /* Walk the chain of free nodes. */
++ for (node = heap->freeList;
++ node != gcvNULL;
++ node = node->next
++ )
++ {
++ gcmkASSERT(node->next != gcdIN_USE);
++
++ /* Check if this free node has enough bytes. */
++ if (node->bytes >= bytes)
++ {
++ /* Use the node. */
++ goto UseNode;
++ }
++
++ /* Save current free node for linked list management. */
++ prevFree = node;
++ }
++ }
++ }
++
++ if (i == 0)
++ {
++ /* Compact the heap. */
++ gcmkVERIFY_OK(_CompactKernelHeap(Heap));
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "===== KERNEL HEAP =====");
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of allocations : %12u",
++ Heap->allocCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of bytes allocated : %12llu",
++ Heap->allocBytes);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum allocation size : %12llu",
++ Heap->allocBytesMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Total number of bytes allocated : %12llu",
++ Heap->allocBytesTotal);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Number of heaps : %12u",
++ Heap->heapCount);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Heap memory in bytes : %12llu",
++ Heap->heapMemory);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum number of heaps : %12u",
++ Heap->heapCountMax);
++ gcmkTRACE_ZONE(gcvLEVEL_VERBOSE, gcvZONE_HEAP,
++ "Maximum heap memory in bytes : %12llu",
++ Heap->heapMemoryMax);
++#endif
++ }
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkONERROR(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ acquired = gcvFALSE;
++
++ /* Allocate a new heap. */
++ gcmkONERROR(
++ gckOS_AllocateMemory(Heap->os,
++ Heap->allocationSize,
++ &memory));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_HEAP,
++ "Allocated heap 0x%x (%lu bytes)",
++ memory, Heap->allocationSize);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++
++ /* Use the allocated memory as the heap. */
++ heap = (gcskHEAP_PTR) memory;
++
++ /* Insert this heap to the head of the chain. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ heap->size = Heap->allocationSize - gcmSIZEOF(gcskHEAP);
++
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap;
++ }
++ Heap->heap = heap;
++
++ /* Mark the end of the heap. */
++ node = (gcskNODE_PTR) ( (gctUINT8_PTR) heap
++ + Heap->allocationSize
++ - gcmSIZEOF(gcskNODE)
++ );
++ node->bytes = 0;
++ node->next = gcvNULL;
++
++ /* Create a free list. */
++ node = (gcskNODE_PTR) (heap + 1);
++ heap->freeList = node;
++
++ /* Initialize the free list. */
++ node->bytes = heap->size - gcmSIZEOF(gcskNODE);
++ node->next = gcvNULL;
++
++ /* No previous free. */
++ prevFree = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profiling. */
++ Heap->heapCount += 1;
++ Heap->heapMemory += Heap->allocationSize;
++
++ if (Heap->heapCount > Heap->heapCountMax)
++ {
++ Heap->heapCountMax = Heap->heapCount;
++ }
++ if (Heap->heapMemory > Heap->heapMemoryMax)
++ {
++ Heap->heapMemoryMax = Heap->heapMemory;
++ }
++#endif
++
++UseNode:
++ /* Verify some stuff. */
++ gcmkASSERT(heap != gcvNULL);
++ gcmkASSERT(node != gcvNULL);
++ gcmkASSERT(node->bytes >= bytes);
++
++ if (heap->prev != gcvNULL)
++ {
++ /* Unlink the heap from the linked list. */
++ heap->prev->next = heap->next;
++ if (heap->next != gcvNULL)
++ {
++ heap->next->prev = heap->prev;
++ }
++
++ /* Move the heap to the front of the list. */
++ heap->next = Heap->heap;
++ heap->prev = gcvNULL;
++ Heap->heap = heap;
++ heap->next->prev = heap;
++ }
++
++ /* Check if there is enough free space left after usage for another free
++ ** node. */
++ if (node->bytes - bytes >= gcmSIZEOF(gcskNODE))
++ {
++ /* Allocated used space from the back of the free list. */
++ used = (gcskNODE_PTR) ((gctUINT8_PTR) node + node->bytes - bytes);
++
++ /* Adjust the number of free bytes. */
++ node->bytes -= bytes;
++ gcmkASSERT(node->bytes >= gcmSIZEOF(gcskNODE));
++ }
++ else
++ {
++ /* Remove this free list from the chain. */
++ if (prevFree == gcvNULL)
++ {
++ heap->freeList = node->next;
++ }
++ else
++ {
++ prevFree->next = node->next;
++ }
++
++ /* Consume the entire free node. */
++ used = (gcskNODE_PTR) node;
++ bytes = node->bytes;
++ }
++
++ /* Mark node as used. */
++ used->bytes = bytes;
++ used->next = gcdIN_USE;
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ used->timeStamp = ++Heap->timeStamp;
++#endif
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocCount += 1;
++ Heap->allocBytes += bytes;
++ Heap->allocBytesMax = gcmMAX(Heap->allocBytes, Heap->allocBytesMax);
++ Heap->allocBytesTotal += bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Return pointer to memory. */
++ *Memory = used + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++ }
++
++ if (memory != gcvNULL)
++ {
++ /* Free the heap memory. */
++ gckOS_FreeMemory(Heap->os, memory);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckHEAP_Free
++**
++** Free allocated memory from the heap.
++**
++** INPUT:
++**
++** gckHEAP Heap
++** Pointer to a gckHEAP object.
++**
++** IN gctPOINTER Memory
++** Pointer to memory to free.
++**
++** OUTPUT:
++**
++** NOTHING.
++*/
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Memory
++ )
++{
++ gcskNODE_PTR node;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Heap=0x%x Memory=0x%x", Heap, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Heap->os, Heap->mutex, gcvINFINITE));
++
++ /* Pointer to structure. */
++ node = (gcskNODE_PTR) Memory - 1;
++
++ /* Mark the node as freed. */
++ node->next = gcvNULL;
++
++#if VIVANTE_PROFILER || gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Update profile counters. */
++ Heap->allocBytes -= node->bytes;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_ReleaseMutex(Heap->os, Heap->mutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x", Heap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++
++ /* Zero the counters. */
++ Heap->allocCount = 0;
++ Heap->allocBytes = 0;
++ Heap->allocBytesMax = 0;
++ Heap->allocBytesTotal = 0;
++ Heap->heapCount = 0;
++ Heap->heapCountMax = 0;
++ Heap->heapMemory = 0;
++ Heap->heapMemoryMax = 0;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ )
++{
++ gcmkHEADER_ARG("Heap=0x%x Title=0x%x", Heap, Title);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Heap, gcvOBJ_HEAP);
++ gcmkVERIFY_ARGUMENT(Title != gcvNULL);
++
++ gcmkPRINT("");
++ gcmkPRINT("=====[ HEAP - %s ]=====", Title);
++ gcmkPRINT("Number of allocations : %12u", Heap->allocCount);
++ gcmkPRINT("Number of bytes allocated : %12llu", Heap->allocBytes);
++ gcmkPRINT("Maximum allocation size : %12llu", Heap->allocBytesMax);
++ gcmkPRINT("Total number of bytes allocated : %12llu", Heap->allocBytesTotal);
++ gcmkPRINT("Number of heaps : %12u", Heap->heapCount);
++ gcmkPRINT("Heap memory in bytes : %12llu", Heap->heapMemory);
++ gcmkPRINT("Maximum number of heaps : %12u", Heap->heapCountMax);
++ gcmkPRINT("Maximum heap memory in bytes : %12llu", Heap->heapMemoryMax);
++ gcmkPRINT("==============================================");
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif /* VIVANTE_PROFILER */
++
++/*******************************************************************************
++***** Test Code ****************************************************************
++*******************************************************************************/
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_interrupt_vg.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_interrupt_vg.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_interrupt_vg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_interrupt_vg.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,877 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++/******************************************************************************\
++*********************** Support Functions and Definitions **********************
++\******************************************************************************/
++
++/* Interruot statistics will be accumulated if not zero. */
++#define gcmENABLE_INTERRUPT_STATISTICS 0
++
++#define _GC_OBJ_ZONE gcvZONE_INTERRUPT
++
++/* Object structure. */
++struct _gckVGINTERRUPT
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* gckVGKERNEL pointer. */
++ gckVGKERNEL kernel;
++
++ /* gckOS pointer. */
++ gckOS os;
++
++ /* Interrupt handlers. */
++ gctINTERRUPT_HANDLER handlers[32];
++
++ /* Main interrupt handler thread. */
++ gctTHREAD handler;
++ gctBOOL terminate;
++
++ /* Interrupt FIFO. */
++ gctSEMAPHORE fifoValid;
++ gctUINT32 fifo[256];
++ gctUINT fifoItems;
++ gctUINT8 head;
++ gctUINT8 tail;
++
++ /* Interrupt statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT maxFifoItems;
++ gctUINT fifoOverflow;
++ gctUINT maxSimultaneous;
++ gctUINT multipleCount;
++#endif
++};
++
++
++/*******************************************************************************
++**
++** _ProcessInterrupt
++**
++** The interrupt processor.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt,
++ gctUINT_PTR TriggeredCount
++ )
++#else
++static void
++_ProcessInterrupt(
++ gckVGINTERRUPT Interrupt
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++ gctUINT i;
++
++ /* Advance to the next entry. */
++ Interrupt->tail += 1;
++ Interrupt->fifoItems -= 1;
++
++ /* Get the interrupt value. */
++ triggered = Interrupt->fifo[Interrupt->tail];
++ gcmkASSERT(triggered != 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: triggered=0x%08X\n",
++ __FUNCTION__,
++ triggered
++ );
++
++ /* Walk through all possible interrupts. */
++ for (i = 0; i < gcmSIZEOF(Interrupt->handlers); i += 1)
++ {
++ /* Test if interrupt happened. */
++ if ((triggered & 1) == 1)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (TriggeredCount != gcvNULL)
++ {
++ (* TriggeredCount) += 1;
++ }
++#endif
++
++ /* Make sure we have valid handler. */
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Interrupt %d isn't registered.\n",
++ __FUNCTION__, i
++ );
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: interrupt=%d\n",
++ __FUNCTION__,
++ i
++ );
++
++ /* Call the handler. */
++ status = Interrupt->handlers[i] (Interrupt->kernel);
++
++ if (gcmkIS_ERROR(status))
++ {
++ /* Failed to signal the semaphore. */
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s: Error %d incrementing the semaphore #%d.\n",
++ __FUNCTION__, status, i
++ );
++ }
++ }
++ }
++
++ /* Next interrupt. */
++ triggered >>= 1;
++
++ /* No more interrupts to handle? */
++ if (triggered == 0)
++ {
++ break;
++ }
++ }
++}
++
++
++/*******************************************************************************
++**
++** _MainInterruptHandler
++**
++** The main interrupt thread serves the interrupt FIFO and calls registered
++** handlers for the interrupts that occured. The handlers are called in the
++** sequence interrupts occured with the exception when multiple interrupts
++** occured at the same time. In that case the handler calls are "sorted" by
++** the interrupt number therefore giving the interrupts with lower numbers
++** higher priority.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gctTHREADFUNCRESULT gctTHREADFUNCTYPE
++_MainInterruptHandler(
++ gctTHREADFUNCPARAMETER ThreadParameter
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gctUINT count;
++#endif
++
++ /* Cast the object. */
++ interrupt = (gckVGINTERRUPT) ThreadParameter;
++
++ /* Enter the loop. */
++ while (gcvTRUE)
++ {
++ /* Wait for an interrupt. */
++ status = gckOS_DecrementSemaphore(interrupt->os, interrupt->fifoValid);
++
++ /* Error? */
++ if (gcmkIS_ERROR(status))
++ {
++ break;
++ }
++
++ /* System termination request? */
++ if (status == gcvSTATUS_TERMINATE)
++ {
++ break;
++ }
++
++ /* Driver is shutting down? */
++ if (interrupt->terminate)
++ {
++ break;
++ }
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Reset triggered count. */
++ count = 0;
++
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt, &count);
++
++ /* Update conters. */
++ if (count > interrupt->maxSimultaneous)
++ {
++ interrupt->maxSimultaneous = count;
++ }
++
++ if (count > 1)
++ {
++ interrupt->multipleCount += 1;
++ }
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(interrupt);
++#endif
++ }
++
++ return 0;
++}
++
++
++/*******************************************************************************
++**
++** _StartInterruptHandler / _StopInterruptHandler
++**
++** Main interrupt handler routine control.
++**
++** INPUT:
++**
++** ThreadParameter
++** Pointer to the gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++static gceSTATUS
++_StartInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status, last;
++
++ do
++ {
++ /* Objects must not be already created. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++ gcmkASSERT(Interrupt->handler == gcvNULL);
++
++ /* Reset the termination request. */
++ Interrupt->terminate = gcvFALSE;
++
++#if !gcdENABLE_INFINITE_SPEED_HW
++ /* Construct the fifo semaphore. */
++ gcmkERR_BREAK(gckOS_CreateSemaphoreVG(
++ Interrupt->os, &Interrupt->fifoValid
++ ));
++
++ /* Start the interrupt handler thread. */
++ gcmkERR_BREAK(gckOS_StartThread(
++ Interrupt->os,
++ _MainInterruptHandler,
++ Interrupt,
++ &Interrupt->handler
++ ));
++#endif
++
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (Interrupt->fifoValid != gcvNULL)
++ {
++ gcmkCHECK_STATUS(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ Interrupt->fifoValid = gcvNULL;
++ }
++
++ /* Return the status. */
++ return status;
++}
++
++static gceSTATUS
++_StopInterruptHandler(
++ gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ do
++ {
++ /* Does the thread exist? */
++ if (Interrupt->handler == gcvNULL)
++ {
++ /* The semaphore must be NULL as well. */
++ gcmkASSERT(Interrupt->fifoValid == gcvNULL);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* The semaphore must exist as well. */
++ gcmkASSERT(Interrupt->fifoValid != gcvNULL);
++
++ /* Set the termination request. */
++ Interrupt->terminate = gcvTRUE;
++
++ /* Unlock the thread. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Wait until the thread quits. */
++ gcmkERR_BREAK(gckOS_StopThread(
++ Interrupt->os,
++ Interrupt->handler
++ ));
++
++ /* Destroy the semaphore. */
++ gcmkERR_BREAK(gckOS_DestroySemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++
++ /* Reset handles. */
++ Interrupt->handler = gcvNULL;
++ Interrupt->fifoValid = gcvNULL;
++ }
++ while (gcvFALSE);
++
++ /* Return the status. */
++ return status;
++}
++
++
++/******************************************************************************\
++***************************** Interrupt Object API *****************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Construct
++**
++** Construct an interrupt object.
++**
++** INPUT:
++**
++** Kernel
++** Pointer to the gckVGKERNEL object.
++**
++** OUTPUT:
++**
++** Interrupt
++** Pointer to the new gckVGINTERRUPT object.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ )
++{
++ gceSTATUS status;
++ gckVGINTERRUPT interrupt = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interrupt=0x%x", Kernel, Interrupt);
++
++ /* Verify argeuments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interrupt != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Kernel->os,
++ gcmSIZEOF(struct _gckVGINTERRUPT),
++ (gctPOINTER *) &interrupt
++ ));
++
++ /* Reset the object data. */
++ gcmkVERIFY_OK(gckOS_ZeroMemory(
++ interrupt, gcmSIZEOF(struct _gckVGINTERRUPT)
++ ));
++
++ /* Initialize the object. */
++ interrupt->object.type = gcvOBJ_INTERRUPT;
++
++ /* Initialize the object pointers. */
++ interrupt->kernel = Kernel;
++ interrupt->os = Kernel->os;
++
++ /* Initialize the current FIFO position. */
++ interrupt->head = (gctUINT8)~0;
++ interrupt->tail = (gctUINT8)~0;
++
++ /* Start the thread. */
++ gcmkERR_BREAK(_StartInterruptHandler(interrupt));
++
++ /* Return interrupt object. */
++ *Interrupt = interrupt;
++
++ gcmkFOOTER_ARG("*Interrup=0x%x", *Interrupt);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (interrupt != gcvNULL)
++ {
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkVERIFY_OK(gckOS_Free(interrupt->os, interrupt));
++ }
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Destroy
++**
++** Destroy an interrupt object.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to the gckVGINTERRUPT object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ do
++ {
++ /* Stop the interrupt thread. */
++ gcmkERR_BREAK(_StopInterruptHandler(Interrupt));
++
++ /* Mark the object as unknown. */
++ Interrupt->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGINTERRUPT structure. */
++ gcmkERR_BREAK(gckOS_Free(Interrupt->os, Interrupt));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_DumpState
++**
++** Print the current state of the interrupt manager.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#if gcvDEBUG
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++ /* Print the header. */
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ "%s: INTERRUPT OBJECT STATUS\n",
++ __FUNCTION__
++ );
++
++ /* Print statistics. */
++#if gcmENABLE_INTERRUPT_STATISTICS
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of FIFO items accumulated at a single time: %d\n",
++ Interrupt->maxFifoItems
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Interrupt FIFO overflow happened times: %d\n",
++ Interrupt->fifoOverflow
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Maximum number of interrupts simultaneously generated: %d\n",
++ Interrupt->maxSimultaneous
++ );
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " Number of times when there were multiple interrupts generated: %d\n",
++ Interrupt->multipleCount
++ );
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " The current number of entries in the FIFO: %d\n",
++ Interrupt->fifoItems
++ );
++
++ /* Print the FIFO contents. */
++ if (Interrupt->fifoItems != 0)
++ {
++ gctUINT8 index;
++ gctUINT8 last;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " FIFO current contents:\n"
++ );
++
++ /* Get the current pointers. */
++ index = Interrupt->tail;
++ last = Interrupt->head;
++
++ while (index != last)
++ {
++ /* Advance to the next entry. */
++ index += 1;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_VERBOSE, gcvZONE_COMMAND,
++ " %d: 0x%08X\n",
++ index, Interrupt->fifo[index]
++ );
++ }
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enable
++**
++** Enable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Pointer to the variable that holds the interrupt number to be
++** registered in range 0..31.
++** If the value is less then 0, gckVGINTERRUPT_Enable will attempt
++** to find an unused interrupt. If such interrupt is found, the number
++** will be assigned to the variable if the functuion call succeedes.
++**
++** Handler
++** Pointer to the handler to register for the interrupt.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ )
++{
++ gceSTATUS status;
++ gctINT32 i;
++
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x Handler=0x%x", Interrupt, Id, Handler);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT(Id != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Handler != gcvNULL);
++
++ do
++ {
++ /* See if we need to allocate an ID. */
++ if (*Id < 0)
++ {
++ /* Find the first unused interrupt handler. */
++ for (i = 0; i < gcmCOUNTOF(Interrupt->handlers); ++i)
++ {
++ if (Interrupt->handlers[i] == gcvNULL)
++ {
++ break;
++ }
++ }
++
++ /* No unused innterrupts? */
++ if (i == gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ break;
++ }
++
++ /* Update the interrupt ID. */
++ *Id = i;
++ }
++
++ /* Make sure the ID is in range. */
++ else if (*Id >= gcmCOUNTOF(Interrupt->handlers))
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ /* Set interrupt handler. */
++ Interrupt->handlers[*Id] = Handler;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Disable
++**
++** Disable the specified interrupt.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** Id
++** Interrupt number to be disabled in range 0..31.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ )
++{
++ gcmkHEADER_ARG("Interrupt=0x%x Id=0x%x", Interrupt, Id);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++ gcmkVERIFY_ARGUMENT((Id >= 0) && (Id < gcmCOUNTOF(Interrupt->handlers)));
++
++ /* Reset interrupt handler. */
++ Interrupt->handlers[Id] = gcvNULL;
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckVGINTERRUPT_Enque
++**
++** Read the interrupt status register and put the value in the interrupt FIFO.
++**
++** INPUT:
++**
++** Interrupt
++** Pointer to a gckVGINTERRUPT object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++
++#ifndef __QNXNTO__
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ )
++#else
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ )
++#endif
++{
++ gceSTATUS status;
++ gctUINT32 triggered;
++
++ gcmkHEADER_ARG("Interrupt=0x%x", Interrupt);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Interrupt, gcvOBJ_INTERRUPT);
++
++#ifdef __QNXNTO__
++ *Os = gcvNULL;
++ *Semaphore = gcvNULL;
++#endif
++
++ do
++ {
++ /* Read interrupt status register. */
++ gcmkERR_BREAK(gckVGHARDWARE_ReadInterrupt(
++ Interrupt->kernel->hardware, &triggered
++ ));
++
++ /* Mask out TS overflow interrupt */
++ triggered &= 0xfffffffe;
++
++ /* No interrupts to process? */
++ if (triggered == 0)
++ {
++ status = gcvSTATUS_NOT_OUR_INTERRUPT;
++ break;
++ }
++
++ /* FIFO overflow? */
++ if (Interrupt->fifoItems == gcmCOUNTOF(Interrupt->fifo))
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ Interrupt->fifoOverflow += 1;
++#endif
++
++ /* OR the interrupt with the last value in the FIFO. */
++ Interrupt->fifo[Interrupt->head] |= triggered;
++
++ /* Success (kind of). */
++ status = gcvSTATUS_OK;
++ }
++ else
++ {
++ /* Advance to the next entry. */
++ Interrupt->head += 1;
++ Interrupt->fifoItems += 1;
++
++#if gcmENABLE_INTERRUPT_STATISTICS
++ if (Interrupt->fifoItems > Interrupt->maxFifoItems)
++ {
++ Interrupt->maxFifoItems = Interrupt->fifoItems;
++ }
++#endif
++
++ /* Set the new value. */
++ Interrupt->fifo[Interrupt->head] = triggered;
++
++#ifndef __QNXNTO__
++ /* Increment the FIFO semaphore. */
++ gcmkERR_BREAK(gckOS_IncrementSemaphore(
++ Interrupt->os, Interrupt->fifoValid
++ ));
++#else
++ *Os = Interrupt->os;
++ *Semaphore = Interrupt->fifoValid;
++#endif
++
++ /* Windows kills our threads prematurely when the application
++ exists. Verify here that the thread is still alive. */
++ status = gckOS_VerifyThread(Interrupt->os, Interrupt->handler);
++
++ /* Has the thread been prematurely terminated? */
++ if (status != gcvSTATUS_OK)
++ {
++ /* Process all accumulated interrupts. */
++ while (Interrupt->head != Interrupt->tail)
++ {
++#if gcmENABLE_INTERRUPT_STATISTICS
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt, gcvNULL);
++#else
++ /* Process the interrupt. */
++ _ProcessInterrupt(Interrupt);
++#endif
++ }
++
++ /* Set success. */
++ status = gcvSTATUS_OK;
++ }
++ }
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_mmu.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_mmu.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_mmu.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_mmu.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,1982 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++typedef enum _gceMMU_TYPE
++{
++ gcvMMU_USED = (0 << 4),
++ gcvMMU_SINGLE = (1 << 4),
++ gcvMMU_FREE = (2 << 4),
++}
++gceMMU_TYPE;
++
++#define gcmENTRY_TYPE(x) (x & 0xF0)
++
++#define gcdMMU_TABLE_DUMP 0
++
++#define gcdUSE_MMU_EXCEPTION 0
++
++/*
++ gcdMMU_CLEAR_VALUE
++
++ The clear value for the entry of the old MMU.
++*/
++#ifndef gcdMMU_CLEAR_VALUE
++# define gcdMMU_CLEAR_VALUE 0x00000ABC
++#endif
++
++/* VIV: Start GPU address for gcvSURF_VERTEX. */
++#define gcdVERTEX_START (128 << 10)
++
++typedef struct _gcsMMU_STLB *gcsMMU_STLB_PTR;
++
++typedef struct _gcsMMU_STLB
++{
++ gctPHYS_ADDR physical;
++ gctUINT32_PTR logical;
++ gctSIZE_T size;
++ gctUINT32 physBase;
++ gctSIZE_T pageCount;
++ gctUINT32 mtlbIndex;
++ gctUINT32 mtlbEntryNum;
++ gcsMMU_STLB_PTR next;
++} gcsMMU_STLB;
++
++#if gcdSHARED_PAGETABLE
++typedef struct _gcsSharedPageTable * gcsSharedPageTable_PTR;
++typedef struct _gcsSharedPageTable
++{
++ /* Shared gckMMU object. */
++ gckMMU mmu;
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsSharedPageTable;
++
++static gcsSharedPageTable_PTR sharedPageTable = gcvNULL;
++#endif
++
++#if gcdMIRROR_PAGETABLE
++typedef struct _gcsMirrorPageTable * gcsMirrorPageTable_PTR;
++typedef struct _gcsMirrorPageTable
++{
++ /* gckMMU objects. */
++ gckMMU mmus[gcdMAX_GPU_COUNT];
++
++ /* Hardwares which use this shared pagetable. */
++ gckHARDWARE hardwares[gcdMAX_GPU_COUNT];
++
++ /* Number of cores use this shared pagetable. */
++ gctUINT32 reference;
++}
++gcsMirrorPageTable;
++
++static gcsMirrorPageTable_PTR mirrorPageTable = gcvNULL;
++static gctPOINTER mirrorPageTableMutex = gcvNULL;
++#endif
++
++typedef struct _gcsDynamicSpaceNode * gcsDynamicSpaceNode_PTR;
++typedef struct _gcsDynamicSpaceNode
++{
++ gctUINT32 start;
++ gctINT32 entries;
++}
++gcsDynamicSpaceNode;
++
++static void
++_WritePageEntry(
++ IN gctUINT32_PTR PageEntry,
++ IN gctUINT32 EntryValue
++ )
++{
++ static gctUINT16 data = 0xff00;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ *PageEntry = gcmSWAB32(EntryValue);
++ }
++ else
++ {
++ *PageEntry = EntryValue;
++ }
++}
++
++static gctUINT32
++_ReadPageEntry(
++ IN gctUINT32_PTR PageEntry
++ )
++{
++ static gctUINT16 data = 0xff00;
++ gctUINT32 entryValue;
++
++ if (*(gctUINT8 *)&data == 0xff)
++ {
++ entryValue = *PageEntry;
++ return gcmSWAB32(entryValue);
++ }
++ else
++ {
++ return *PageEntry;
++ }
++}
++
++static gceSTATUS
++_FillPageTable(
++ IN gctUINT32_PTR PageTable,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 EntryValue
++)
++{
++ gctUINT i;
++
++ for (i = 0; i < PageCount; i++)
++ {
++ _WritePageEntry(PageTable + i, EntryValue);
++ }
++
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_Link(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Next
++ )
++{
++ if (Index >= Mmu->pageTableEntries)
++ {
++ /* Just move heap pointer. */
++ Mmu->heapList = Next;
++ }
++ else
++ {
++ /* Address page table. */
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++
++ /* Dispatch on node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[Index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Set single index. */
++ _WritePageEntry(&pageTable[Index], (Next << 8) | gcvMMU_SINGLE);
++ break;
++
++ case gcvMMU_FREE:
++ /* Set index. */
++ _WritePageEntry(&pageTable[Index + 1], Next);
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", Index);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++static gceSTATUS
++_AddFree(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 Node,
++ IN gctUINT32 Count
++ )
++{
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++
++ if (Count == 1)
++ {
++ /* Initialize a single page node. */
++ _WritePageEntry(pageTable + Node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
++ }
++ else
++ {
++ /* Initialize the node. */
++ _WritePageEntry(pageTable + Node + 0, (Count << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + Node + 1, ~0U);
++ }
++
++ /* Append the node. */
++ return _Link(Mmu, Index, Node);
++}
++
++static gceSTATUS
++_Collect(
++ IN gckMMU Mmu
++ )
++{
++ gctUINT32_PTR pageTable = Mmu->pageTableLogical;
++ gceSTATUS status;
++ gctUINT32 i, previous, start = 0, count = 0;
++
++ previous = Mmu->heapList = ~0U;
++ Mmu->freeNodes = gcvFALSE;
++
++ /* Walk the entire page table. */
++ for (i = 0; i < Mmu->pageTableEntries; ++i)
++ {
++ /* Dispatch based on type of page. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[i])))
++ {
++ case gcvMMU_USED:
++ /* Used page, so close any open node. */
++ if (count > 0)
++ {
++ /* Add the node. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++
++ /* Reset the node. */
++ previous = start;
++ count = 0;
++ }
++ break;
++
++ case gcvMMU_SINGLE:
++ /* Single free node. */
++ if (count++ == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* A free node. */
++ if (count == 0)
++ {
++ /* Start a new node. */
++ start = i;
++ }
++
++ /* Advance the count. */
++ count += _ReadPageEntry(&pageTable[i]) >> 8;
++
++ /* Advance the index into the page table. */
++ i += (_ReadPageEntry(&pageTable[i]) >> 8) - 1;
++ break;
++
++ default:
++ gcmkFATAL("MMU page table correcupted at index %u!", i);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++ }
++
++ /* See if we have an open node left. */
++ if (count > 0)
++ {
++ /* Add the node to the list. */
++ gcmkONERROR(_AddFree(Mmu, previous, start, count));
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_MMU,
++ "Performed a garbage collection of the MMU heap.");
++
++ /* Success. */
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the staus. */
++ return status;
++}
++
++static gctUINT32
++_SetPage(gctUINT32 PageAddress)
++{
++ return PageAddress
++ /* writable */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0);
++}
++
++static gceSTATUS
++_FillFlatMapping(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBase,
++ OUT gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcsMMU_STLB_PTR head = gcvNULL, pre = gcvNULL;
++ gctUINT32 start = PhysBase & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 end = (PhysBase + Size - 1) & (~gcdMMU_PAGE_64K_MASK);
++ gctUINT32 mStart = start >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 mEnd = end >> gcdMMU_MTLB_SHIFT;
++ gctUINT32 sStart = (start & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++ gctUINT32 sEnd = (end & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ while (mStart <= mEnd)
++ {
++ gcmkASSERT(mStart < gcdMMU_MTLB_ENTRY_NUM);
++ if (*(Mmu->mtlbLogical + mStart) == 0)
++ {
++ gcsMMU_STLB_PTR stlb;
++ gctPOINTER pointer = gcvNULL;
++ gctUINT32 last = (mStart == mEnd) ? sEnd : (gcdMMU_STLB_64K_ENTRY_NUM - 1);
++
++ gcmkONERROR(gckOS_Allocate(Mmu->os, sizeof(struct _gcsMMU_STLB), &pointer));
++ stlb = pointer;
++
++ stlb->mtlbEntryNum = 0;
++ stlb->next = gcvNULL;
++ stlb->physical = gcvNULL;
++ stlb->logical = gcvNULL;
++ stlb->size = gcdMMU_STLB_64K_SIZE;
++ stlb->pageCount = 0;
++
++ if (pre == gcvNULL)
++ {
++ pre = head = stlb;
++ }
++ else
++ {
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = stlb;
++ pre = stlb;
++ }
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &stlb->size,
++ &stlb->physical,
++ (gctPOINTER)&stlb->logical));
++
++ gcmkONERROR(gckOS_ZeroMemory(stlb->logical, stlb->size));
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(
++ Mmu->os,
++ stlb->logical,
++ &stlb->physBase));
++
++ if (stlb->physBase & (gcdMMU_STLB_64K_SIZE - 1))
++ {
++ gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
++ }
++
++ _WritePageEntry(Mmu->mtlbLogical + mStart,
++ stlb->physBase
++ /* 64KB page size */
++ | (1 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0)
++ );
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ mStart,
++ _ReadPageEntry(Mmu->mtlbLogical + mStart));
++#endif
++
++ stlb->mtlbIndex = mStart;
++ stlb->mtlbEntryNum = 1;
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): STLB: logical:%08x -> physical:%08x\n",
++ __FUNCTION__, __LINE__,
++ stlb->logical,
++ stlb->physBase);
++#endif
++
++ while (sStart <= last)
++ {
++ gcmkASSERT(!(start & gcdMMU_PAGE_64K_MASK));
++ _WritePageEntry(stlb->logical + sStart, _SetPage(start));
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert STLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ sStart,
++ _ReadPageEntry(stlb->logical + sStart));
++#endif
++ /* next page. */
++ start += gcdMMU_PAGE_64K_SIZE;
++ sStart++;
++ stlb->pageCount++;
++ }
++
++ sStart = 0;
++ ++mStart;
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++ }
++
++ /* Insert the stlb into staticSTLB. */
++ if (Mmu->staticSTLB == gcvNULL)
++ {
++ Mmu->staticSTLB = head;
++ }
++ else
++ {
++ gcmkASSERT(pre == gcvNULL);
++ gcmkASSERT(pre->next == gcvNULL);
++ pre->next = Mmu->staticSTLB;
++ Mmu->staticSTLB = head;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++
++ /* Roll back. */
++ while (head != gcvNULL)
++ {
++ pre = head;
++ head = head->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_FindDynamicSpace(
++ IN gckMMU Mmu,
++ OUT gcsDynamicSpaceNode_PTR *Array,
++ OUT gctINT * Size
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gctPOINTER pointer = gcvNULL;
++ gcsDynamicSpaceNode_PTR array = gcvNULL;
++ gctINT size = 0;
++ gctINT i = 0, nodeStart = -1, nodeEntries = 0;
++
++ /* Allocate memory for the array. */
++ gcmkONERROR(gckOS_Allocate(Mmu->os,
++ gcmSIZEOF(*array) * (gcdMMU_MTLB_ENTRY_NUM / 2),
++ &pointer));
++
++ array = (gcsDynamicSpaceNode_PTR)pointer;
++
++ /* Loop all the entries. */
++ while (i < gcdMMU_MTLB_ENTRY_NUM)
++ {
++ if (!Mmu->mtlbLogical[i])
++ {
++ if (nodeStart < 0)
++ {
++ /* This is the first entry of the dynamic space. */
++ nodeStart = i;
++ nodeEntries = 1;
++ }
++ else
++ {
++ /* Other entries of the dynamic space. */
++ nodeEntries++;
++ }
++ }
++ else if (nodeStart >= 0)
++ {
++ /* Save the previous node. */
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++
++ /* Reset the start. */
++ nodeStart = -1;
++ nodeEntries = 0;
++ }
++
++ i++;
++ }
++
++ /* Save the previous node. */
++ if (nodeStart >= 0)
++ {
++ array[size].start = nodeStart;
++ array[size].entries = nodeEntries;
++ size++;
++ }
++
++#if gcdMMU_TABLE_DUMP
++ for (i = 0; i < size; i++)
++ {
++ gckOS_Print("%s(%d): [%d]: start=%d, entries=%d.\n",
++ __FUNCTION__, __LINE__,
++ i,
++ array[i].start,
++ array[i].entries);
++ }
++#endif
++
++ *Array = array;
++ *Size = size;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (pointer != gcvNULL)
++ {
++ gckOS_Free(Mmu->os, pointer);
++ }
++
++ return status;
++}
++
++static gceSTATUS
++_SetupDynamicSpace(
++ IN gckMMU Mmu
++ )
++{
++ gceSTATUS status;
++ gcsDynamicSpaceNode_PTR nodeArray = gcvNULL;
++ gctINT i, nodeArraySize = 0;
++ gctUINT32 physical;
++ gctINT numEntries = 0;
++ gctUINT32_PTR pageTable;
++ gctBOOL acquired = gcvFALSE;
++
++ /* Find all the dynamic address space. */
++ gcmkONERROR(_FindDynamicSpace(Mmu, &nodeArray, &nodeArraySize));
++
++ /* TODO: We only use the largest one for now. */
++ for (i = 0; i < nodeArraySize; i++)
++ {
++ if (nodeArray[i].entries > numEntries)
++ {
++ Mmu->dynamicMappingStart = nodeArray[i].start;
++ numEntries = nodeArray[i].entries;
++ }
++ }
++
++ gckOS_Free(Mmu->os, (gctPOINTER)nodeArray);
++
++ Mmu->pageTableSize = numEntries * 4096;
++
++ Mmu->pageTableEntries = Mmu->pageTableSize / gcmSIZEOF(gctUINT32);
++
++ /* Construct Slave TLB. */
++ gcmkONERROR(gckOS_AllocateContiguous(Mmu->os,
++ gcvFALSE,
++ &Mmu->pageTableSize,
++ &Mmu->pageTablePhysical,
++ (gctPOINTER)&Mmu->pageTableLogical));
++
++#if gcdUSE_MMU_EXCEPTION
++ gcmkONERROR(_FillPageTable(Mmu->pageTableLogical,
++ Mmu->pageTableEntries,
++ /* Enable exception */
++ 1 << 1));
++#else
++ /* Invalidate all entries. */
++ gcmkONERROR(gckOS_ZeroMemory(Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++#endif
++
++ /* Initilization. */
++ pageTable = Mmu->pageTableLogical;
++ _WritePageEntry(pageTable, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + 1, ~0U);
++ Mmu->heapList = 0;
++ Mmu->freeNodes = gcvFALSE;
++
++ gcmkONERROR(gckOS_GetPhysicalAddress(Mmu->os,
++ Mmu->pageTableLogical,
++ &physical));
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Map to Master TLB. */
++ for (i = (gctINT)Mmu->dynamicMappingStart;
++ i < (gctINT)Mmu->dynamicMappingStart + numEntries;
++ i++)
++ {
++ _WritePageEntry(Mmu->mtlbLogical + i,
++ physical
++ /* 4KB page size */
++ | (0 << 2)
++ /* Ignore exception */
++ | (0 << 1)
++ /* Present */
++ | (1 << 0)
++ );
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
++ __FUNCTION__, __LINE__,
++ i,
++ _ReadPageEntry(Mmu->mtlbLogical + i));
++#endif
++ physical += gcdMMU_STLB_4K_SIZE;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (Mmu->pageTableLogical)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** _Construct
++**
++** Construct a new gckMMU object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckMMU * Mmu
++** Pointer to a variable that receives the gckMMU object pointer.
++*/
++gceSTATUS
++_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++ gckOS os;
++ gckHARDWARE hardware;
++ gceSTATUS status;
++ gckMMU mmu = gcvNULL;
++ gctUINT32_PTR pageTable;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=%lu", Kernel, MmuSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckMMU object. */
++ gcmkONERROR(gckOS_Allocate(os, sizeof(struct _gckMMU), &pointer));
++
++ mmu = pointer;
++
++ /* Initialize the gckMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++ mmu->pageTableMutex = gcvNULL;
++ mmu->pageTableLogical = gcvNULL;
++ mmu->mtlbLogical = gcvNULL;
++ mmu->staticSTLB = gcvNULL;
++ mmu->enabled = gcvFALSE;
++#ifdef __QNXNTO__
++ mmu->nodeList = gcvNULL;
++ mmu->nodeMutex = gcvNULL;
++#endif
++
++ /* Create the page table mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &mmu->pageTableMutex));
++
++#ifdef __QNXNTO__
++ /* Create the node list mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &mmu->nodeMutex));
++#endif
++
++ if (hardware->mmuVersion == 0)
++ {
++ mmu->pageTableSize = MmuSize;
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ &pointer));
++
++ mmu->pageTableLogical = pointer;
++
++ /* Compute number of entries in page table. */
++ mmu->pageTableEntries = mmu->pageTableSize / sizeof(gctUINT32);
++
++ /* Mark all pages as free. */
++ pageTable = mmu->pageTableLogical;
++
++#if gcdMMU_CLEAR_VALUE
++ _FillPageTable(pageTable, mmu->pageTableEntries, gcdMMU_CLEAR_VALUE);
++#endif
++
++ _WritePageEntry(pageTable, (mmu->pageTableEntries << 8) | gcvMMU_FREE);
++ _WritePageEntry(pageTable + 1, ~0U);
++ mmu->heapList = 0;
++ mmu->freeNodes = gcvFALSE;
++
++ /* Set page table address. */
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(hardware, (gctPOINTER) mmu->pageTableLogical));
++ }
++ else
++ {
++ /* Allocate the 4K mode MTLB table. */
++ mmu->mtlbSize = gcdMMU_MTLB_SIZE + 64;
++
++ gcmkONERROR(
++ gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->mtlbSize,
++ &mmu->mtlbPhysical,
++ &pointer));
++
++ mmu->mtlbLogical = pointer;
++
++ /* Invalid all the entries. */
++ gcmkONERROR(
++ gckOS_ZeroMemory(pointer, mmu->mtlbSize));
++ }
++
++ /* Return the gckMMU object pointer. */
++ *Mmu = mmu;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Mmu=0x%x", *Mmu);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (mmu != gcvNULL)
++ {
++ if (mmu->pageTableLogical != gcvNULL)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->pageTablePhysical,
++ (gctPOINTER) mmu->pageTableLogical,
++ mmu->pageTableSize));
++
++ }
++
++ if (mmu->mtlbLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(os,
++ mmu->mtlbPhysical,
++ (gctPOINTER) mmu->mtlbLogical,
++ mmu->mtlbSize));
++ }
++
++ if (mmu->pageTableMutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, mmu->pageTableMutex));
++ }
++
++#ifdef __QNXNTO__
++ if (mmu->nodeMutex != gcvNULL)
++ {
++ /* Delete the mutex. */
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, mmu->nodeMutex));
++ }
++#endif
++
++ /* Mark the gckMMU object as unknown. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the allocates memory. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, mmu));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** _Destroy
++**
++** Destroy a gckMMU object.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#ifdef __QNXNTO__
++ gcuVIDMEM_NODE_PTR node, next;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++#ifdef __QNXNTO__
++ /* Free all associated virtual memory. */
++ for (node = Mmu->nodeList; node != gcvNULL; node = next)
++ {
++ next = node->Virtual.next;
++ gcmkVERIFY_OK(gckVIDMEM_Free(node));
++ }
++#endif
++
++ while (Mmu->staticSTLB != gcvNULL)
++ {
++ gcsMMU_STLB_PTR pre = Mmu->staticSTLB;
++ Mmu->staticSTLB = pre->next;
++
++ if (pre->physical != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ pre->physical,
++ pre->logical,
++ pre->size));
++ }
++
++ if (pre->mtlbEntryNum != 0)
++ {
++ gcmkASSERT(pre->mtlbEntryNum == 1);
++ _WritePageEntry(Mmu->mtlbLogical + pre->mtlbIndex, 0);
++#if gcdMMU_TABLE_DUMP
++ gckOS_Print("%s(%d): clean MTLB[%d]\n",
++ __FUNCTION__, __LINE__,
++ pre->mtlbIndex);
++#endif
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, pre));
++ }
++
++ if (Mmu->hardware->mmuVersion != 0)
++ {
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->mtlbPhysical,
++ (gctPOINTER) Mmu->mtlbLogical,
++ Mmu->mtlbSize));
++ }
++
++ /* Free the page table. */
++ gcmkVERIFY_OK(
++ gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ (gctPOINTER) Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++
++#ifdef __QNXNTO__
++ /* Delete the node list mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->nodeMutex));
++#endif
++
++ /* Delete the page table mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->pageTableMutex));
++
++ /* Mark the gckMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckMMU object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, Mmu));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** _AdjstIndex
++**
++** Adjust the index from which we search for a usable node to make sure
++** index allocated is greater than Start.
++*/
++gceSTATUS
++_AdjustIndex(
++ IN gckMMU Mmu,
++ IN gctUINT32 Index,
++ IN gctUINT32 PageCount,
++ IN gctUINT32 Start,
++ OUT gctUINT32 * IndexAdjusted
++ )
++{
++ gceSTATUS status;
++ gctUINT32 index = Index;
++ gctUINT32_PTR map = Mmu->pageTableLogical;
++
++ gcmkHEADER();
++
++ for (; index < Mmu->pageTableEntries;)
++ {
++ gctUINT32 result = 0;
++ gctUINT32 nodeSize = 0;
++
++ if (index >= Start)
++ {
++ break;
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ nodeSize = 1;
++ break;
++
++ case gcvMMU_FREE:
++ nodeSize = map[index] >> 8;
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ if (nodeSize > PageCount)
++ {
++ result = index + (nodeSize - PageCount);
++
++ if (result >= Start)
++ {
++ break;
++ }
++ }
++
++ switch (gcmENTRY_TYPE(map[index]))
++ {
++ case gcvMMU_SINGLE:
++ index = map[index] >> 8;
++ break;
++
++ case gcvMMU_FREE:
++ index = map[index + 1];
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ *IndexAdjusted = index;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (sharedPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsSharedPageTable),
++ &pointer));
++ sharedPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(sharedPageTable,
++ sizeof(struct _gcsSharedPageTable)));
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, &sharedPageTable->mmu));
++ }
++ else if (Kernel->hardware->mmuVersion == 0)
++ {
++ /* Set page table address. */
++ gcmkONERROR(
++ gckHARDWARE_SetMMU(Kernel->hardware, (gctPOINTER) sharedPageTable->mmu->pageTableLogical));
++ }
++
++ *Mmu = sharedPageTable->mmu;
++
++ sharedPageTable->hardwares[sharedPageTable->reference] = Kernel->hardware;
++
++ sharedPageTable->reference++;
++
++ gcmkFOOTER_ARG("sharedPageTable->reference=%lu", sharedPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (sharedPageTable)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(gckMMU_Destroy(sharedPageTable->mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, sharedPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#elif gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pointer;
++
++ gcmkHEADER_ARG("Kernel=0x%08x", Kernel);
++
++ if (mirrorPageTable == gcvNULL)
++ {
++ gcmkONERROR(
++ gckOS_Allocate(Kernel->os,
++ sizeof(struct _gcsMirrorPageTable),
++ &pointer));
++ mirrorPageTable = pointer;
++
++ gcmkONERROR(
++ gckOS_ZeroMemory(mirrorPageTable,
++ sizeof(struct _gcsMirrorPageTable)));
++
++ gcmkONERROR(
++ gckOS_CreateMutex(Kernel->os, &mirrorPageTableMutex));
++ }
++
++ gcmkONERROR(_Construct(Kernel, MmuSize, Mmu));
++
++ mirrorPageTable->mmus[mirrorPageTable->reference] = *Mmu;
++
++ mirrorPageTable->hardwares[mirrorPageTable->reference] = Kernel->hardware;
++
++ mirrorPageTable->reference++;
++
++ gcmkFOOTER_ARG("mirrorPageTable->reference=%lu", mirrorPageTable->reference);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mirrorPageTable && mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, mirrorPageTable));
++ }
++
++ gcmkFOOTER();
++ return status;
++#else
++ return _Construct(Kernel, MmuSize, Mmu);
++#endif
++}
++
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ )
++{
++#if gcdSHARED_PAGETABLE
++ sharedPageTable->reference--;
++
++ if (sharedPageTable->reference == 0)
++ {
++ if (sharedPageTable->mmu)
++ {
++ gcmkVERIFY_OK(_Destroy(Mmu));
++ }
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, sharedPageTable));
++ }
++
++ return gcvSTATUS_OK;
++#elif gcdMIRROR_PAGETABLE
++ mirrorPageTable->reference--;
++
++ if (mirrorPageTable->reference == 0)
++ {
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTable));
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, mirrorPageTableMutex));
++ }
++
++ return _Destroy(Mmu);
++#else
++ return _Destroy(Mmu);
++#endif
++}
++
++/*******************************************************************************
++**
++** gckMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS
++_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gctUINT32 index = 0, previous = ~0U, left;
++ gctUINT32_PTR pageTable;
++ gctBOOL gotIt;
++ gctUINT32 address;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=%lu", Mmu, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ if (PageCount > Mmu->pageTableEntries)
++ {
++ gcmkPRINT("[galcore]: %s(%d): Run out of free page entry.",
++ __FUNCTION__, __LINE__);
++
++ /* Not enough pages avaiable. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ /* Cast pointer to page table. */
++ for (pageTable = Mmu->pageTableLogical, gotIt = gcvFALSE; !gotIt;)
++ {
++ index = Mmu->heapList;
++
++ if ((Mmu->hardware->mmuVersion == 0) && (Type == gcvSURF_VERTEX))
++ {
++ gcmkONERROR(_AdjustIndex(
++ Mmu,
++ index,
++ PageCount,
++ gcdVERTEX_START / gcmSIZEOF(gctUINT32),
++ &index
++ ));
++ }
++
++ /* Walk the heap list. */
++ for (; !gotIt && (index < Mmu->pageTableEntries);)
++ {
++ /* Check the node type. */
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Single odes are valid if we only need 1 page. */
++ if (PageCount == 1)
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&pageTable[index]) >> 8;
++ }
++ break;
++
++ case gcvMMU_FREE:
++ /* Test if the node has enough space. */
++ if (PageCount <= (_ReadPageEntry(&pageTable[index]) >> 8))
++ {
++ gotIt = gcvTRUE;
++ }
++ else
++ {
++ /* Move to next node. */
++ previous = index;
++ index = _ReadPageEntry(&pageTable[index + 1]);
++ }
++ break;
++
++ default:
++ gcmkFATAL("MMU table correcupted at index %u!", index);
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++
++ /* Test if we are out of memory. */
++ if (index >= Mmu->pageTableEntries)
++ {
++ if (Mmu->freeNodes)
++ {
++ /* Time to move out the trash! */
++ gcmkONERROR(_Collect(Mmu));
++ }
++ else
++ {
++ gcmkPRINT("[galcore]: %s(%d): Run out of free page entry.",
++ __FUNCTION__, __LINE__);
++
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ }
++
++ switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[index])))
++ {
++ case gcvMMU_SINGLE:
++ /* Unlink single node from free list. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&pageTable[index]) >> 8));
++ break;
++
++ case gcvMMU_FREE:
++ /* Check how many pages will be left. */
++ left = (_ReadPageEntry(&pageTable[index]) >> 8) - PageCount;
++ switch (left)
++ {
++ case 0:
++ /* The entire node is consumed, just unlink it. */
++ gcmkONERROR(
++ _Link(Mmu, previous, _ReadPageEntry(&pageTable[index + 1])));
++ break;
++
++ case 1:
++ /* One page will remain. Convert the node to a single node and
++ ** advance the index. */
++ _WritePageEntry(&pageTable[index], (_ReadPageEntry(&pageTable[index + 1]) << 8) | gcvMMU_SINGLE);
++ index ++;
++ break;
++
++ default:
++ /* Enough pages remain for a new node. However, we will just adjust
++ ** the size of the current node and advance the index. */
++ _WritePageEntry(&pageTable[index], (left << 8) | gcvMMU_FREE);
++ index += left;
++ break;
++ }
++ break;
++ }
++
++ /* Mark node as used. */
++ gcmkONERROR(_FillPageTable(&pageTable[index], PageCount, gcvMMU_USED));
++
++ /* Return pointer to page table. */
++ *PageTable = &pageTable[index];
++
++ /* Build virtual address. */
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(
++ gckHARDWARE_BuildVirtualAddress(Mmu->hardware, index, 0, &address));
++ }
++ else
++ {
++ gctUINT32 masterOffset = index / gcdMMU_STLB_4K_ENTRY_NUM
++ + Mmu->dynamicMappingStart;
++ gctUINT32 slaveOffset = index % gcdMMU_STLB_4K_ENTRY_NUM;
++
++ address = (masterOffset << gcdMMU_MTLB_SHIFT)
++ | (slaveOffset << gcdMMU_STLB_4K_SHIFT);
++ }
++
++ if (Address != gcvNULL)
++ {
++ *Address = address;
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageTable=0x%x *Address=%08x",
++ *PageTable, gcmOPT_VALUE(Address));
++ return gcvSTATUS_OK;
++
++OnError:
++
++ if (mutex)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckMMU Mmu
++** Pointer to an gckMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32_PTR pageTable;
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=%lu",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ /* Convert the pointer. */
++ pageTable = (gctUINT32_PTR) PageTable;
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++#if gcdMMU_CLEAR_VALUE
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ _FillPageTable(pageTable, PageCount, gcdMMU_CLEAR_VALUE);
++ }
++#endif
++
++ if (PageCount == 1)
++ {
++ /* Single page node. */
++ _WritePageEntry(pageTable,
++ (~((1U<<8)-1)) | gcvMMU_SINGLE
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ | 1 << 1
++#endif
++ );
++ }
++ else
++ {
++ /* Mark the node as free. */
++ _WritePageEntry(pageTable,
++ (PageCount << 8) | gcvMMU_FREE
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ | 1 << 1
++#endif
++ );
++ _WritePageEntry(pageTable + 1, ~0U);
++
++#if gcdUSE_MMU_EXCEPTION
++ /* Enable exception */
++ gcmkVERIFY_OK(_FillPageTable(pageTable + 2, PageCount - 2, 1 << 1));
++#endif
++ }
++
++ /* We have free nodes. */
++ Mmu->freeNodes = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ return gckMMU_AllocatePagesEx(
++ Mmu, PageCount, gcvSURF_UNKNOWN, PageTable, Address);
++}
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gceSTATUS status;
++ gctPOINTER pageTable;
++ gctUINT32 address;
++ gctINT i;
++ gckMMU mmu;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL allocated = gcvFALSE;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++ acquired = gcvTRUE;
++
++ /* Allocate page table for current MMU. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ if (Mmu == mirrorPageTable->mmus[i])
++ {
++ gcmkONERROR(_AllocatePages(Mmu, PageCount, Type, PageTable, Address));
++ allocated = gcvTRUE;
++ }
++ }
++
++ /* Allocate page table for other MMUs. */
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (Mmu != mmu)
++ {
++ gcmkONERROR(_AllocatePages(mmu, PageCount, Type, &pageTable, &address));
++ gcmkASSERT(address == *Address);
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ acquired = gcvFALSE;
++
++ return gcvSTATUS_OK;
++OnError:
++
++ if (allocated)
++ {
++ /* Page tables for multiple GPU always keep the same. So it is impossible
++ * the fist one allocates successfully but others fail.
++ */
++ gcmkASSERT(0);
++ }
++
++ if (acquired)
++ {
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++ }
++
++ return status;
++#else
++ return _AllocatePages(Mmu, PageCount, Type, PageTable, Address);
++#endif
++}
++
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctINT i;
++ gctUINT32 offset;
++ gckMMU mmu;
++
++ gckOS_AcquireMutex(Mmu->os, mirrorPageTableMutex, gcvINFINITE);
++
++ gcmkVERIFY_OK(_FreePages(Mmu, PageTable, PageCount));
++
++ offset = (gctUINT32)PageTable - (gctUINT32)Mmu->pageTableLogical;
++
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ gcmkVERIFY_OK(_FreePages(mmu, mmu->pageTableLogical + offset/4, PageCount));
++ }
++ }
++
++ gckOS_ReleaseMutex(Mmu->os, mirrorPageTableMutex);
++
++ return gcvSTATUS_OK;
++#else
++ return _FreePages(Mmu, PageTable, PageCount);
++#endif
++}
++
++gceSTATUS
++gckMMU_Enable(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize
++ )
++{
++ gceSTATUS status;
++#if gcdSHARED_PAGETABLE
++ gckHARDWARE hardware;
++ gctINT i;
++#endif
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++#if gcdSHARED_PAGETABLE
++ if (Mmu->enabled)
++ {
++ gcmkFOOTER_ARG("Status=%d", gcvSTATUS_SKIP);
++ return gcvSTATUS_SKIP;
++ }
++#endif
++
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ /* Success. */
++ gcmkFOOTER_ARG("Status=%d", gcvSTATUS_SKIP);
++ return gcvSTATUS_SKIP;
++ }
++ else
++ {
++ if (PhysSize != 0)
++ {
++ gcmkONERROR(_FillFlatMapping(
++ Mmu,
++ PhysBaseAddr,
++ PhysSize
++ ));
++ }
++
++ gcmkONERROR(_SetupDynamicSpace(Mmu));
++
++#if gcdSHARED_PAGETABLE
++ for(i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ hardware = sharedPageTable->hardwares[i];
++ if (hardware != gcvNULL)
++ {
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ hardware,
++ gcvTRUE,
++ Mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvFALSE
++ ));
++ }
++ }
++#else
++ gcmkONERROR(
++ gckHARDWARE_SetMMUv2(
++ Mmu->hardware,
++ gcvTRUE,
++ Mmu->mtlbLogical,
++ gcvMMU_MODE_4K,
++ (gctUINT8_PTR)Mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
++ gcvFALSE
++ ));
++#endif
++
++ Mmu->enabled = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++#if gcdMIRROR_PAGETABLE
++ gctUINT32_PTR pageEntry;
++ gctINT i;
++ gckMMU mmu;
++ gctUINT32 offset = (gctUINT32)PageEntry - (gctUINT32)Mmu->pageTableLogical;
++#endif
++
++ gctUINT32 data;
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ if (Mmu->hardware->mmuVersion == 0)
++ {
++ data = PageAddress;
++ }
++ else
++ {
++ data = _SetPage(PageAddress);
++ }
++
++ _WritePageEntry(PageEntry, data);
++
++#if gcdMIRROR_PAGETABLE
++ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
++ {
++ mmu = mirrorPageTable->mmus[i];
++
++ if (mmu != Mmu)
++ {
++ pageEntry = mmu->pageTableLogical + offset / 4;
++
++ if (mmu->hardware->mmuVersion == 0)
++ {
++ _WritePageEntry(pageEntry, PageAddress);
++ }
++ else
++ {
++ _WritePageEntry(pageEntry, _SetPage(PageAddress));
++ }
++ }
++
++ }
++#endif
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_InsertNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node)
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x Node=0x%x", Mmu, Node);
++
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ Node->Virtual.next = Mmu->nodeList;
++ Mmu->nodeList = Node;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_RemoveNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node)
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcuVIDMEM_NODE_PTR *iter;
++
++ gcmkHEADER_ARG("Mmu=0x%x Node=0x%x", Mmu, Node);
++
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ for (iter = &Mmu->nodeList; *iter; iter = &(*iter)->Virtual.next)
++ {
++ if (*iter == Node)
++ {
++ *iter = Node->Virtual.next;
++ break;
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckMMU_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckMMU Mmu,
++ IN gctUINT32 Pid
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gcuVIDMEM_NODE_PTR curr, next;
++
++ gcmkHEADER_ARG("Kernel=0x%x, Mmu=0x%x Pid=%u", Kernel, Mmu, Pid);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ for (curr = Mmu->nodeList; curr != gcvNULL; curr = next)
++ {
++ next = curr->Virtual.next;
++
++ if (curr->Virtual.processID == Pid)
++ {
++ while (curr->Virtual.unlockPendings[Kernel->core] == 0 && curr->Virtual.lockeds[Kernel->core] > 0)
++ {
++ gcmkONERROR(gckVIDMEM_Unlock(Kernel, curr, gcvSURF_TYPE_UNKNOWN, gcvNULL));
++ }
++
++ gcmkVERIFY_OK(gckVIDMEM_Free(curr));
++ }
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu
++ )
++{
++ gckHARDWARE hardware;
++#if gcdSHARED_PAGETABLE
++ gctINT i;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ continue;
++ }
++#endif
++ hardware = sharedPageTable->hardwares[i];
++ if (hardware)
++ {
++ /* Notify cores who use this page table. */
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++ }
++ }
++#elif gcdMIRROR_PAGETABLE
++ gctINT i;
++ for (i = 0; i < mirrorPageTable->reference; i++)
++ {
++ hardware = mirrorPageTable->hardwares[i];
++
++ /* Notify cores who use this page table. */
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++ }
++#else
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ )
++{
++ gctUINT32_PTR pageTable;
++ gctUINT32 index;
++ gctUINT32 mtlb, stlb;
++
++ gcmkHEADER_ARG("Mmu=0x%08X Address=0x%08X", Mmu, Address);
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ gcmkASSERT(Mmu->hardware->mmuVersion > 0);
++
++ mtlb = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
++ stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
++
++ if (Address >= 0x80000000)
++ {
++ pageTable = Mmu->pageTableLogical;
++
++ index = (mtlb - Mmu->dynamicMappingStart)
++ * gcdMMU_STLB_4K_ENTRY_NUM
++ + stlb;
++
++ gcmkPRINT(" Page table entry = 0x%08X", _ReadPageEntry(pageTable + index));
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/******************************************************************************
++****************************** T E S T C O D E ******************************
++******************************************************************************/
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_mmu_vg.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_mmu_vg.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_mmu_vg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_mmu_vg.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,522 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define _GC_OBJ_ZONE gcvZONE_MMU
++
++/*******************************************************************************
++**
++** gckVGMMU_Construct
++**
++** Construct a new gckVGMMU object.
++**
++** INPUT:
++**
++** gckVGKERNEL Kernel
++** Pointer to an gckVGKERNEL object.
++**
++** gctSIZE_T MmuSize
++** Number of bytes for the page table.
++**
++** OUTPUT:
++**
++** gckVGMMU * Mmu
++** Pointer to a variable that receives the gckVGMMU object pointer.
++*/
++gceSTATUS gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckVGMMU * Mmu
++ )
++{
++ gckOS os;
++ gckVGHARDWARE hardware;
++ gceSTATUS status;
++ gckVGMMU mmu;
++ gctUINT32 * pageTable;
++ gctUINT32 i;
++
++ gcmkHEADER_ARG("Kernel=0x%x MmuSize=0x%x Mmu=0x%x", Kernel, MmuSize, Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(MmuSize > 0);
++ gcmkVERIFY_ARGUMENT(Mmu != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Extract the gckVGHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Allocate memory for the gckVGMMU object. */
++ status = gckOS_Allocate(os, sizeof(struct _gckVGMMU), (gctPOINTER *) &mmu);
++
++ if (status < 0)
++ {
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate gckVGMMU object.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Initialize the gckVGMMU object. */
++ mmu->object.type = gcvOBJ_MMU;
++ mmu->os = os;
++ mmu->hardware = hardware;
++
++ /* Create the mutex. */
++ status = gckOS_CreateMutex(os, &mmu->mutex);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Allocate the page table. */
++ mmu->pageTableSize = MmuSize;
++ status = gckOS_AllocateContiguous(os,
++ gcvFALSE,
++ &mmu->pageTableSize,
++ &mmu->pageTablePhysical,
++ &mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not allocate page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Compute number of entries in page table. */
++ mmu->entryCount = mmu->pageTableSize / sizeof(gctUINT32);
++ mmu->entry = 0;
++
++ /* Mark the entire page table as available. */
++ pageTable = (gctUINT32 *) mmu->pageTableLogical;
++ for (i = 0; i < mmu->entryCount; i++)
++ {
++ pageTable[i] = (gctUINT32)~0;
++ }
++
++ /* Set page table address. */
++ status = gckVGHARDWARE_SetMMU(hardware, mmu->pageTableLogical);
++
++ if (status < 0)
++ {
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(mmu->os,
++ mmu->pageTablePhysical,
++ mmu->pageTableLogical,
++ mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(os, mmu->mutex));
++
++ mmu->object.type = gcvOBJ_UNKNOWN;
++ gcmkVERIFY_OK(gckOS_Free(os, mmu));
++
++ /* Error. */
++ gcmkFATAL(
++ "%s(%d): could not program page table.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Return the gckVGMMU object pointer. */
++ *Mmu = mmu;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u entries at %p.(0x%08X)\n",
++ __FUNCTION__, __LINE__,
++ mmu->entryCount,
++ mmu->pageTableLogical,
++ mmu->pageTablePhysical
++ );
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_Destroy
++**
++** Destroy a nAQMMU object.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++
++ /* Free the page table. */
++ gcmkVERIFY_OK(gckOS_FreeContiguous(Mmu->os,
++ Mmu->pageTablePhysical,
++ Mmu->pageTableLogical,
++ Mmu->pageTableSize));
++
++ /* Roll back. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->mutex));
++
++ /* Mark the gckVGMMU object as unknown. */
++ Mmu->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVGMMU object. */
++ gcmkVERIFY_OK(gckOS_Free(Mmu->os, Mmu));
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_AllocatePages
++**
++** Allocate pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctSIZE_T PageCount
++** Number of pages to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * PageTable
++** Pointer to a variable that receives the base address of the page
++** table.
++**
++** gctUINT32 * Address
++** Pointer to a variable that receives the hardware specific address.
++*/
++gceSTATUS gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 tail, index, i;
++ gctUINT32 * table;
++ gctBOOL allocated = gcvFALSE;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageCount=0x%x PageTable=0x%x Address=0x%x",
++ Mmu, PageCount, PageTable, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ if (PageCount > Mmu->entryCount)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): page table too small for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ gcmkFOOTER_NO();
++ /* Not enough pages avaiable. */
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Grab the mutex. */
++ status = gckOS_AcquireMutex(Mmu->os, Mmu->mutex, gcvINFINITE);
++
++ if (status < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): could not acquire mutex.\n"
++ ,__FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER();
++ /* Error. */
++ return status;
++ }
++
++ /* Compute the tail for this allocation. */
++ tail = Mmu->entryCount - PageCount;
++
++ /* Walk all entries until we find enough slots. */
++ for (index = Mmu->entry; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++
++ if (!allocated)
++ {
++ if (status >= 0)
++ {
++ /* Walk all entries until we find enough slots. */
++ for (index = 0; index <= tail;)
++ {
++ /* Access page table. */
++ table = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ /* See if all slots are available. */
++ for (i = 0; i < PageCount; i++, table++)
++ {
++ if (*table != ~0)
++ {
++ /* Start from next slot. */
++ index += i + 1;
++ break;
++ }
++ }
++
++ if (i == PageCount)
++ {
++ /* Bail out if we have enough page entries. */
++ allocated = gcvTRUE;
++ break;
++ }
++ }
++ }
++ }
++
++ if (!allocated && (status >= 0))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_MMU,
++ "%s(%d): not enough free pages for %u pages.\n",
++ __FUNCTION__, __LINE__,
++ PageCount
++ );
++
++ /* Not enough empty slots available. */
++ status = gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ if (status >= 0)
++ {
++ /* Build virtual address. */
++ status = gckVGHARDWARE_BuildVirtualAddress(Mmu->hardware,
++ index,
++ 0,
++ Address);
++
++ if (status >= 0)
++ {
++ /* Update current entry into page table. */
++ Mmu->entry = index + PageCount;
++
++ /* Return pointer to page table. */
++ *PageTable = (gctUINT32 *) Mmu->pageTableLogical + index;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): allocated %u pages at index %u (0x%08X) @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ index,
++ *Address,
++ *PageTable
++ );
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->mutex));
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVGMMU_FreePages
++**
++** Free pages inside the page table.
++**
++** INPUT:
++**
++** gckVGMMU Mmu
++** Pointer to an gckVGMMU object.
++**
++** gctPOINTER PageTable
++** Base address of the page table to free.
++**
++** gctSIZE_T PageCount
++** Number of pages to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ )
++{
++ gctUINT32 * table;
++
++ gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=0x%x",
++ Mmu, PageTable, PageCount);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_MMU,
++ "%s(%d): freeing %u pages at index %u @ %p.\n",
++ __FUNCTION__, __LINE__,
++ PageCount,
++ ((gctUINT32 *) PageTable - (gctUINT32 *) Mmu->pageTableLogical),
++ PageTable
++ );
++
++ /* Convert pointer. */
++ table = (gctUINT32 *) PageTable;
++
++ /* Mark the page table entries as available. */
++ while (PageCount-- > 0)
++ {
++ *table++ = (gctUINT32)~0;
++ }
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ )
++{
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
++ gcmkVERIFY_ARGUMENT(PageEntry != gcvNULL);
++ gcmkVERIFY_ARGUMENT(!(PageAddress & 0xFFF));
++
++ *PageEntry = PageAddress;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ )
++{
++ gckVGHARDWARE hardware;
++
++ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
++
++ hardware = Mmu->hardware;
++ gcmkVERIFY_OK(
++ gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_power.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_power.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_power.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_power.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,347 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_POWER
++
++/******************************************************************************\
++************************ Dynamic Voltage Frequency Setting *********************
++\******************************************************************************/
++#if gcdDVFS
++static gctUINT32
++_GetLoadHistory(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Select,
++ IN gctUINT32 Index
++)
++{
++ return Dvfs->loads[Index];
++}
++
++static void
++_IncreaseScale(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ if (Dvfs->currentScale < 32)
++ {
++ *Scale = Dvfs->currentScale + 8;
++ }
++ else
++ {
++ *Scale = Dvfs->currentScale + 8;
++ *Scale = gcmMIN(64, *Scale);
++ }
++}
++
++static void
++_RecordFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory *history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ if (history->frequency == 0)
++ {
++ history->frequency = Frequency;
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ history->count++;
++ }
++}
++
++static gctUINT32
++_GetFrequencyHistory(
++ gckDVFS Dvfs,
++ gctUINT32 Frequency
++ )
++{
++ gctUINT32 i = 0;
++
++ struct _FrequencyHistory * history = Dvfs->frequencyHistory;
++
++ for (i = 0; i < 16; i++)
++ {
++ if (history->frequency == Frequency)
++ {
++ break;
++ }
++
++ history++;
++ }
++
++ if (i < 16)
++ {
++ return history->count;
++ }
++
++ return 0;
++}
++
++static void
++_Policy(
++ IN gckDVFS Dvfs,
++ IN gctUINT32 Load,
++ OUT gctUINT8 *Scale
++ )
++{
++ gctUINT8 load[4], nextLoad;
++ gctUINT8 scale;
++
++ /* Last 4 history. */
++ load[0] = (Load & 0xFF);
++ load[1] = (Load & 0xFF00) >> 8;
++ load[2] = (Load & 0xFF0000) >> 16;
++ load[3] = (Load & 0xFF000000) >> 24;
++
++ /* Determine target scale. */
++ if (load[0] > 54)
++ {
++ _IncreaseScale(Dvfs, Load, &scale);
++ }
++ else
++ {
++ nextLoad = (load[0] + load[1] + load[2] + load[3])/4;
++
++ scale = Dvfs->currentScale * (nextLoad) / 54;
++
++ scale = gcmMAX(1, scale);
++ scale = gcmMIN(64, scale);
++ }
++
++ Dvfs->totalConfig++;
++
++ Dvfs->loads[(load[0]-1)/8]++;
++
++ *Scale = scale;
++
++
++ if (Dvfs->totalConfig % 100 == 0)
++ {
++ gcmkPRINT("=======================================================");
++ gcmkPRINT("GPU Load: %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ 8, 16, 24, 32, 40, 48, 56, 64);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d %-8d %-8d %-8d",
++ _GetLoadHistory(Dvfs,2, 0),
++ _GetLoadHistory(Dvfs,2, 1),
++ _GetLoadHistory(Dvfs,2, 2),
++ _GetLoadHistory(Dvfs,2, 3),
++ _GetLoadHistory(Dvfs,2, 4),
++ _GetLoadHistory(Dvfs,2, 5),
++ _GetLoadHistory(Dvfs,2, 6),
++ _GetLoadHistory(Dvfs,2, 7)
++ );
++
++ gcmkPRINT("Frequency(MHz) %-8d %-8d %-8d %-8d %-8d",
++ 58, 120, 240, 360, 480);
++ gcmkPRINT(" %-8d %-8d %-8d %-8d %-8d",
++ _GetFrequencyHistory(Dvfs, 58),
++ _GetFrequencyHistory(Dvfs,120),
++ _GetFrequencyHistory(Dvfs,240),
++ _GetFrequencyHistory(Dvfs,360),
++ _GetFrequencyHistory(Dvfs,480)
++ );
++ }
++}
++
++static void
++_TimerFunction(
++ gctPOINTER Data
++ )
++{
++ gceSTATUS status;
++ gckDVFS dvfs = (gckDVFS) Data;
++ gckHARDWARE hardware = dvfs->hardware;
++ gctUINT32 value;
++ gctUINT32 frequency;
++ gctUINT8 scale;
++ gctUINT32 t1, t2, consumed;
++
++ gckOS_GetTicks(&t1);
++
++ gcmkONERROR(gckHARDWARE_QueryLoad(hardware, &value));
++
++ /* determine target sacle. */
++ _Policy(dvfs, value, &scale);
++
++ /* Set frequency and voltage. */
++ gcmkONERROR(gckOS_SetGPUFrequency(hardware->os, hardware->core, scale));
++
++ /* Query real frequency. */
++ gcmkONERROR(
++ gckOS_QueryGPUFrequency(hardware->os,
++ hardware->core,
++ &frequency,
++ &dvfs->currentScale));
++
++ _RecordFrequencyHistory(dvfs, frequency);
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_POWER,
++ "Current frequency = %d",
++ frequency);
++
++ /* Set period. */
++ gcmkONERROR(gckHARDWARE_SetDVFSPeroid(hardware, frequency));
++
++OnError:
++ /* Determine next querying time. */
++ gckOS_GetTicks(&t2);
++
++ consumed = gcmMIN(((long)t2 - (long)t1), 5);
++
++ if (dvfs->stop == gcvFALSE)
++ {
++ gcmkVERIFY_OK(gckOS_StartTimer(hardware->os,
++ dvfs->timer,
++ dvfs->pollingTime - consumed));
++ }
++
++ return;
++}
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Dvfs
++ )
++{
++ gceSTATUS status;
++ gctPOINTER pointer;
++ gckDVFS dvfs = gcvNULL;
++ gckOS os = Hardware->os;
++
++ gcmkHEADER_ARG("Hardware=0x%X", Hardware);
++
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Allocate a gckDVFS manager. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(struct _gckDVFS), &pointer));
++
++ gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckDVFS));
++
++ dvfs = pointer;
++
++ /* Initialization. */
++ dvfs->hardware = Hardware;
++ dvfs->pollingTime = gcdDVFS_POLLING_TIME;
++ dvfs->os = Hardware->os;
++ dvfs->currentScale = 64;
++
++ /* Create a polling timer. */
++ gcmkONERROR(gckOS_CreateTimer(os, _TimerFunction, pointer, &dvfs->timer));
++
++ /* Initialize frequency and voltage adjustment helper. */
++ gcmkONERROR(gckOS_PrepareGPUFrequency(os, Hardware->core));
++
++ /* Return result. */
++ *Dvfs = dvfs;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (dvfs)
++ {
++ if (dvfs->timer)
++ {
++ gcmkVERIFY_OK(gckOS_DestroyTimer(os, dvfs->timer));
++ }
++
++ gcmkOS_SAFE_FREE(os, dvfs);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ /* Deinitialize helper fuunction. */
++ gcmkVERIFY_OK(gckOS_FinishGPUFrequency(Dvfs->os, Dvfs->hardware->core));
++
++ /* DestroyTimer. */
++ gcmkVERIFY_OK(gckOS_DestroyTimer(Dvfs->os, Dvfs->timer));
++
++ gcmkOS_SAFE_FREE(Dvfs->os, Dvfs);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ gckHARDWARE_InitDVFS(Dvfs->hardware);
++
++ Dvfs->stop = gcvFALSE;
++
++ gckOS_StartTimer(Dvfs->os, Dvfs->timer, Dvfs->pollingTime);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ )
++{
++ gcmkHEADER_ARG("Dvfs=0x%X", Dvfs);
++ gcmkVERIFY_ARGUMENT(Dvfs != gcvNULL);
++
++ Dvfs->stop = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_precomp.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_precomp.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_precomp.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_precomp.h 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,29 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_precomp_h_
++#define __gc_hal_kernel_precomp_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++
++#endif /* __gc_hal_kernel_precomp_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_security.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_security.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_security.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_security.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,239 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++
++
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++#if gcdSECURITY
++
++/*
++** Open a security service channel.
++*/
++gceSTATUS
++gckKERNEL_SecurityOpen(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPU,
++ OUT gctUINT32 *Channel
++ )
++{
++ gceSTATUS status;
++
++ gcmkONERROR(gckOS_OpenSecurityChannel(Kernel->os, Kernel->core, Channel));
++ gcmkONERROR(gckOS_InitSecurityChannel(*Channel));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/*
++** Close a security service channel
++*/
++gceSTATUS
++gckKERNEL_SecurityClose(
++ IN gctUINT32 Channel
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*
++** Security service interface.
++*/
++gceSTATUS
++gckKERNEL_SecurityCallService(
++ IN gctUINT32 Channel,
++ IN OUT gcsTA_INTERFACE * Interface
++)
++{
++ gceSTATUS status;
++ gcmkHEADER();
++
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ gckOS_CallSecurityService(Channel, Interface);
++
++ status = Interface->result;
++
++ gcmkONERROR(status);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityStartCommand(
++ IN gckKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_START_COMMAND;
++ iface.u.StartCommand.gpu = Kernel->core;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityAllocateSecurityMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 Bytes,
++ OUT gctUINT32 * Handle
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_ALLOCATE_SECRUE_MEMORY;
++ iface.u.AllocateSecurityMemory.bytes = Bytes;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ *Handle = iface.u.AllocateSecurityMemory.memory_handle;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityExecute(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Buffer,
++ IN gctUINT32 Bytes
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_EXECUTE;
++ iface.u.Execute.command_buffer = (gctUINT32 *)Buffer;
++ iface.u.Execute.gpu = Kernel->core;
++ iface.u.Execute.command_buffer_length = Bytes;
++
++#if defined(LINUX)
++ gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os, Buffer,
++ (gctUINT32 *)&iface.u.Execute.command_buffer));
++#endif
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ /* Update queue tail pointer. */
++ gcmkONERROR(gckHARDWARE_UpdateQueueTail(
++ Kernel->hardware, 0, 0
++ ));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityMapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 *PhysicalArray,
++ IN gctUINT32 PageCount,
++ OUT gctUINT32 * GPUAddress
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_MAP_MEMORY;
++
++#if defined(LINUX)
++ gcmkONERROR(gckOS_GetPhysicalAddress(Kernel->os, PhysicalArray,
++ (gctUINT32 *)&iface.u.MapMemory.physicals));
++#endif
++
++ iface.u.MapMemory.pageCount = PageCount;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ *GPUAddress = iface.u.MapMemory.gpuAddress;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_SecurityUnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 GPUAddress,
++ IN gctUINT32 PageCount
++ )
++{
++ gceSTATUS status;
++ gcsTA_INTERFACE iface;
++
++ gcmkHEADER();
++
++ iface.command = KERNEL_UNMAP_MEMORY;
++
++ iface.u.UnmapMemory.gpuAddress = GPUAddress;
++ iface.u.UnmapMemory.pageCount = PageCount;
++
++ gcmkONERROR(gckKERNEL_SecurityCallService(Kernel->securityChannel, &iface));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_vg.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_vg.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_vg.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_vg.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,896 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#if gcdENABLE_VG
++
++#define ENABLE_VG_TRY_VIRTUAL_MEMORY 0
++
++#define _GC_OBJ_ZONE gcvZONE_VG
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_Construct
++**
++** Construct a new gckKERNEL object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN gctPOINTER Context
++** Pointer to a driver defined context.
++**
++** OUTPUT:
++**
++** gckKERNEL * Kernel
++** Pointer to a variable that will hold the pointer to the gckKERNEL
++** object.
++*/
++gceSTATUS gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ )
++{
++ gceSTATUS status;
++ gckVGKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x Context=0x%x", Os, Context);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++
++ do
++ {
++ /* Allocate the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Allocate(
++ Os,
++ sizeof(struct _gckVGKERNEL),
++ (gctPOINTER *) &kernel
++ ));
++
++ /* Initialize the gckKERNEL object. */
++ kernel->object.type = gcvOBJ_KERNEL;
++ kernel->os = Os;
++ kernel->context = Context;
++ kernel->hardware = gcvNULL;
++ kernel->interrupt = gcvNULL;
++ kernel->command = gcvNULL;
++ kernel->mmu = gcvNULL;
++ kernel->kernel = inKernel;
++
++ /* Construct the gckVGHARDWARE object. */
++ gcmkERR_BREAK(gckVGHARDWARE_Construct(
++ Os, &kernel->hardware
++ ));
++
++ /* Set pointer to gckKERNEL object in gckVGHARDWARE object. */
++ kernel->hardware->kernel = kernel;
++
++ /* Construct the gckVGINTERRUPT object. */
++ gcmkERR_BREAK(gckVGINTERRUPT_Construct(
++ kernel, &kernel->interrupt
++ ));
++
++ /* Construct the gckVGCOMMAND object. */
++ gcmkERR_BREAK(gckVGCOMMAND_Construct(
++ kernel, gcmKB2BYTES(8), gcmKB2BYTES(2), &kernel->command
++ ));
++
++ /* Construct the gckVGMMU object. */
++ gcmkERR_BREAK(gckVGMMU_Construct(
++ kernel, gcmKB2BYTES(32), &kernel->mmu
++ ));
++
++ /* Return pointer to the gckKERNEL object. */
++ *Kernel = kernel;
++
++ gcmkFOOTER_ARG("*Kernel=0x%x", *Kernel);
++ /* Success. */
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Roll back. */
++ if (kernel != gcvNULL)
++ {
++ if (kernel->mmu != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGMMU_Destroy(kernel->mmu));
++ }
++
++ if (kernel->command != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGCOMMAND_Destroy(kernel->command));
++ }
++
++ if (kernel->interrupt != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGINTERRUPT_Destroy(kernel->interrupt));
++ }
++
++ if (kernel->hardware != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckVGHARDWARE_Destroy(kernel->hardware));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(Os, kernel));
++ }
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Destroy
++**
++** Destroy an gckKERNEL object.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ do
++ {
++ /* Destroy the gckVGMMU object. */
++ if (Kernel->mmu != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGMMU_Destroy(Kernel->mmu));
++ Kernel->mmu = gcvNULL;
++ }
++
++ /* Destroy the gckVGCOMMAND object. */
++ if (Kernel->command != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGCOMMAND_Destroy(Kernel->command));
++ Kernel->command = gcvNULL;
++ }
++
++ /* Destroy the gckVGINTERRUPT object. */
++ if (Kernel->interrupt != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGINTERRUPT_Destroy(Kernel->interrupt));
++ Kernel->interrupt = gcvNULL;
++ }
++
++ /* Destroy the gckVGHARDWARE object. */
++ if (Kernel->hardware != gcvNULL)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_Destroy(Kernel->hardware));
++ Kernel->hardware = gcvNULL;
++ }
++
++ /* Mark the gckKERNEL object as unknown. */
++ Kernel->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckKERNEL object. */
++ gcmkERR_BREAK(gckOS_Free(Kernel->os, Kernel));
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_AllocateLinearMemory
++**
++** Function walks all required memory pools and allocates the requested
++** amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL * Pool
++** Pointer the desired memory pool.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctSIZE_T Alignment
++** Required buffer alignment.
++**
++** gceSURF_TYPE Type
++** Surface type.
++**
++** OUTPUT:
++**
++** gcePOOL * Pool
++** Pointer to the actual pool where the memory was allocated.
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Allocated node.
++*/
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gcePOOL pool;
++ gceSTATUS status;
++ gckVIDMEM videoMemory;
++
++ /* Get initial pool. */
++ switch (pool = *Pool)
++ {
++ case gcvPOOL_DEFAULT:
++ case gcvPOOL_LOCAL:
++ pool = gcvPOOL_LOCAL_INTERNAL;
++ break;
++
++ case gcvPOOL_UNIFIED:
++ pool = gcvPOOL_SYSTEM;
++ break;
++
++ default:
++ break;
++ }
++
++ do
++ {
++ /* Verify the number of bytes to allocate. */
++ if (Bytes == 0)
++ {
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++
++ if (pool == gcvPOOL_VIRTUAL)
++ {
++ /* Create a gcuVIDMEM_NODE for virtual memory. */
++ gcmkERR_BREAK(gckVIDMEM_ConstructVirtual(Kernel, gcvFALSE, Bytes, Node));
++
++ /* Success. */
++ break;
++ }
++
++ else
++ {
++ /* Get pointer to gckVIDMEM object for pool. */
++ status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
++
++ if (status == gcvSTATUS_OK)
++ {
++ if(*Pool == gcvPOOL_SYSTEM)
++ Type |= gcvSURF_VG;
++ /* Allocate memory. */
++ status = gckVIDMEM_AllocateLinear(Kernel,
++ videoMemory,
++ Bytes,
++ Alignment,
++ Type,
++ Node);
++
++ if (status == gcvSTATUS_OK)
++ {
++ /* Memory allocated. */
++ break;
++ }
++ }
++ }
++
++ if (pool == gcvPOOL_LOCAL_INTERNAL)
++ {
++ /* Advance to external memory. */
++ pool = gcvPOOL_LOCAL_EXTERNAL;
++ }
++ else if (pool == gcvPOOL_LOCAL_EXTERNAL)
++ {
++ /* Advance to contiguous system memory. */
++ pool = gcvPOOL_SYSTEM;
++ }
++ else if (pool == gcvPOOL_SYSTEM)
++ {
++ /* Advance to virtual memory. */
++#if ENABLE_VG_TRY_VIRTUAL_MEMORY
++ pool = gcvPOOL_VIRTUAL;
++#else
++ /*VG non-contiguous memory support is not ready yet, disable it temporary*/
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++#endif
++ }
++ else
++ {
++ /* Out of pools. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++ }
++ /* Loop only for multiple selection pools. */
++ while ((*Pool == gcvPOOL_DEFAULT)
++ || (*Pool == gcvPOOL_LOCAL)
++ || (*Pool == gcvPOOL_UNIFIED)
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ /* Return pool used for allocation. */
++ *Pool = pool;
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_Dispatch
++**
++** Dispatch a command received from the user HAL layer.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that defines the command to
++** be dispatched.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
++** returned.
++*/
++gceSTATUS gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE * kernelInterface = Interface;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 processID;
++ gckKERNEL kernel = Kernel;
++ gctPOINTER info = gcvNULL;
++ gctPHYS_ADDR physical = gcvNULL;
++ gctPOINTER logical = gcvNULL;
++ gctSIZE_T bytes = 0;
++
++ gcmkHEADER_ARG("Kernel=0x%x Interface=0x%x ", Kernel, Interface);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
++
++ gcmkONERROR(gckOS_GetProcessID(&processID));
++
++ /* Dispatch on command. */
++ switch (Interface->command)
++ {
++ case gcvHAL_QUERY_VIDEO_MEMORY:
++ /* Query video memory size. */
++ gcmkERR_BREAK(gckKERNEL_QueryVideoMemory(
++ Kernel, kernelInterface
++ ));
++ break;
++
++ case gcvHAL_QUERY_CHIP_IDENTITY:
++ /* Query chip identity. */
++ gcmkERR_BREAK(gckVGHARDWARE_QueryChipIdentity(
++ Kernel->vg->hardware,
++ &kernelInterface->u.QueryChipIdentity.chipModel,
++ &kernelInterface->u.QueryChipIdentity.chipRevision,
++ &kernelInterface->u.QueryChipIdentity.chipFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures,
++ &kernelInterface->u.QueryChipIdentity.chipMinorFeatures2
++ ));
++ break;
++
++ case gcvHAL_QUERY_COMMAND_BUFFER:
++ /* Query command buffer information. */
++ gcmkERR_BREAK(gckKERNEL_QueryCommandBuffer(
++ Kernel,
++ &kernelInterface->u.QueryCommandBuffer.information
++ ));
++ break;
++ case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate non-paged memory. */
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_NON_PAGED_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free non-paged memory. */
++ gcmkERR_BREAK(gckOS_FreeNonPagedMemory(
++ Kernel->os,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY:
++ bytes = (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes;
++ /* Allocate contiguous memory. */
++ gcmkERR_BREAK(gckOS_AllocateContiguous(
++ Kernel->os,
++ gcvTRUE,
++ &bytes,
++ &physical,
++ &logical
++ ));
++
++ kernelInterface->u.AllocateNonPagedMemory.bytes = bytes;
++ kernelInterface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
++ kernelInterface->u.AllocateNonPagedMemory.physical = gcmPTR_TO_NAME(physical);
++ break;
++
++ case gcvHAL_FREE_CONTIGUOUS_MEMORY:
++ physical = gcmNAME_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.physical);
++ /* Unmap user logical out of physical memory first. */
++ gcmkERR_BREAK(gckOS_UnmapUserLogical(
++ Kernel->os,
++ physical,
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical)
++ ));
++
++ /* Free contiguous memory. */
++ gcmkERR_BREAK(gckOS_FreeContiguous(
++ Kernel->os,
++ physical,
++ gcmUINT64_TO_PTR(kernelInterface->u.AllocateNonPagedMemory.logical),
++ (gctSIZE_T) kernelInterface->u.AllocateNonPagedMemory.bytes
++ ));
++
++ gcmRELEASE_NAME(kernelInterface->u.AllocateNonPagedMemory.physical);
++ break;
++
++ case gcvHAL_ALLOCATE_VIDEO_MEMORY:
++ {
++ gctSIZE_T bytes;
++ gctUINT32 bitsPerPixel;
++ gctUINT32 bits;
++
++ /* Align width and height to tiles. */
++ gcmkERR_BREAK(gckVGHARDWARE_AlignToTile(
++ Kernel->vg->hardware,
++ kernelInterface->u.AllocateVideoMemory.type,
++ &kernelInterface->u.AllocateVideoMemory.width,
++ &kernelInterface->u.AllocateVideoMemory.height
++ ));
++
++ /* Convert format into bytes per pixel and bytes per tile. */
++ gcmkERR_BREAK(gckVGHARDWARE_ConvertFormat(
++ Kernel->vg->hardware,
++ kernelInterface->u.AllocateVideoMemory.format,
++ &bitsPerPixel,
++ gcvNULL
++ ));
++
++ /* Compute number of bits for the allocation. */
++ bits
++ = kernelInterface->u.AllocateVideoMemory.width
++ * kernelInterface->u.AllocateVideoMemory.height
++ * kernelInterface->u.AllocateVideoMemory.depth
++ * bitsPerPixel;
++
++ /* Compute number of bytes for the allocation. */
++ bytes = gcmALIGN(bits, 8) / 8;
++
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Kernel,
++ &kernelInterface->u.AllocateVideoMemory.pool,
++ bytes,
++ 64,
++ kernelInterface->u.AllocateVideoMemory.type,
++ &node
++ ));
++
++ kernelInterface->u.AllocateVideoMemory.node = gcmPTR_TO_UINT64(node);
++ }
++ break;
++
++ case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
++ /* Allocate memory. */
++ gcmkERR_BREAK(gckKERNEL_AllocateLinearMemory(
++ Kernel,
++ &kernelInterface->u.AllocateLinearVideoMemory.pool,
++ kernelInterface->u.AllocateLinearVideoMemory.bytes,
++ kernelInterface->u.AllocateLinearVideoMemory.alignment,
++ kernelInterface->u.AllocateLinearVideoMemory.type,
++ &node
++ ));
++
++ gcmkERR_BREAK(gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node,
++ gcvNULL,
++ kernelInterface->u.AllocateLinearVideoMemory.bytes
++ ));
++
++ kernelInterface->u.AllocateLinearVideoMemory.node = gcmPTR_TO_UINT64(node);
++ break;
++
++ case gcvHAL_FREE_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.FreeVideoMemory.node);
++#ifdef __QNXNTO__
++ /* Unmap the video memory */
++
++ if ((node->VidMem.memory->object.type == gcvOBJ_VIDMEM) &&
++ (node->VidMem.logical != gcvNULL))
++ {
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ node->VidMem.logical,
++ processID,
++ node->VidMem.bytes);
++ node->VidMem.logical = gcvNULL;
++ }
++#endif /* __QNXNTO__ */
++
++ /* Free video memory. */
++ gcmkERR_BREAK(gckVIDMEM_Free(Kernel,
++ node
++ ));
++
++ gcmkERR_BREAK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_VIDEO_MEMORY,
++ node
++ ));
++
++ break;
++
++ case gcvHAL_MAP_MEMORY:
++ /* Map memory. */
++ gcmkERR_BREAK(gckKERNEL_MapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ &logical
++ ));
++ kernelInterface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
++ break;
++
++ case gcvHAL_UNMAP_MEMORY:
++ /* Unmap memory. */
++ gcmkERR_BREAK(gckKERNEL_UnmapMemory(
++ Kernel,
++ gcmINT2PTR(kernelInterface->u.MapMemory.physical),
++ (gctSIZE_T) kernelInterface->u.MapMemory.bytes,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapMemory.logical)
++ ));
++ break;
++
++ case gcvHAL_MAP_USER_MEMORY:
++ /* Map user memory to DMA. */
++ gcmkERR_BREAK(gckOS_MapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.MapUserMemory.memory),
++ kernelInterface->u.MapUserMemory.physical,
++ (gctSIZE_T) kernelInterface->u.MapUserMemory.size,
++ &info,
++ &kernelInterface->u.MapUserMemory.address
++ ));
++
++ kernelInterface->u.MapUserMemory.info = gcmPTR_TO_NAME(info);
++ break;
++
++ case gcvHAL_UNMAP_USER_MEMORY:
++ /* Unmap user memory. */
++ gcmkERR_BREAK(gckOS_UnmapUserMemory(
++ Kernel->os,
++ gcvCORE_VG,
++ gcmUINT64_TO_PTR(kernelInterface->u.UnmapUserMemory.memory),
++ (gctSIZE_T) kernelInterface->u.UnmapUserMemory.size,
++ gcmNAME_TO_PTR(kernelInterface->u.UnmapUserMemory.info),
++ kernelInterface->u.UnmapUserMemory.address
++ ));
++ gcmRELEASE_NAME(kernelInterface->u.UnmapUserMemory.info);
++ break;
++ case gcvHAL_LOCK_VIDEO_MEMORY:
++ node = gcmUINT64_TO_PTR(Interface->u.LockVideoMemory.node);
++
++ /* Lock video memory. */
++ gcmkERR_BREAK(
++ gckVIDMEM_Lock(Kernel,
++ node,
++ gcvFALSE,
++ &Interface->u.LockVideoMemory.address));
++
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ /* Map video memory address into user space. */
++#ifdef __QNXNTO__
++ if (node->VidMem.logical == gcvNULL)
++ {
++ gcmkONERROR(
++ gckKERNEL_MapVideoMemory(Kernel,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ processID,
++ node->VidMem.bytes,
++ &node->VidMem.logical));
++ }
++
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->VidMem.logical);
++#else
++ gcmkERR_BREAK(
++ gckKERNEL_MapVideoMemoryEx(Kernel,
++ gcvCORE_VG,
++ FromUser,
++ Interface->u.LockVideoMemory.address,
++ &logical));
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(logical);
++#endif
++ }
++ else
++ {
++ Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(node->Virtual.logical);
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++
++#if gcdSECURE_USER
++ /* Return logical address as physical address. */
++ Interface->u.LockVideoMemory.address =
++ (gctUINT32)(Interface->u.LockVideoMemory.memory);
++#endif
++ gcmkERR_BREAK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node,
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvHAL_UNLOCK_VIDEO_MEMORY:
++ /* Unlock video memory. */
++ node = gcmUINT64_TO_PTR(Interface->u.UnlockVideoMemory.node);
++
++#if gcdSECURE_USER
++ /* Save node information before it disappears. */
++ if (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ logical = gcvNULL;
++ bytes = 0;
++ }
++ else
++ {
++ logical = node->Virtual.logical;
++ bytes = node->Virtual.bytes;
++ }
++#endif
++
++ /* Unlock video memory. */
++ gcmkERR_BREAK(
++ gckVIDMEM_Unlock(Kernel,
++ node,
++ Interface->u.UnlockVideoMemory.type,
++ &Interface->u.UnlockVideoMemory.asynchroneous));
++
++#if gcdSECURE_USER
++ /* Flush the translation cache for virtual surfaces. */
++ if (logical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_FlushTranslationCache(Kernel,
++ cache,
++ logical,
++ bytes));
++ }
++#endif
++
++ if (Interface->u.UnlockVideoMemory.asynchroneous == gcvFALSE)
++ {
++ /* There isn't a event to unlock this node, remove record now */
++ gcmkERR_BREAK(
++ gckKERNEL_RemoveProcessDB(Kernel,
++ processID, gcvDB_VIDEO_MEMORY_LOCKED,
++ node));
++ }
++
++ break;
++ case gcvHAL_USER_SIGNAL:
++#if !USE_NEW_LINUX_SIGNAL
++ /* Dispatch depends on the user signal subcommands. */
++ switch(Interface->u.UserSignal.command)
++ {
++ case gcvUSER_SIGNAL_CREATE:
++ /* Create a signal used in the user space. */
++ gcmkERR_BREAK(
++ gckOS_CreateUserSignal(Kernel->os,
++ Interface->u.UserSignal.manualReset,
++ &Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(
++ gckKERNEL_AddProcessDB(Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id),
++ gcvNULL,
++ 0));
++ break;
++
++ case gcvUSER_SIGNAL_DESTROY:
++ /* Destroy the signal. */
++ gcmkERR_BREAK(
++ gckOS_DestroyUserSignal(Kernel->os,
++ Interface->u.UserSignal.id));
++
++ gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
++ Kernel,
++ processID, gcvDB_SIGNAL,
++ gcmINT2PTR(Interface->u.UserSignal.id)));
++ break;
++
++ case gcvUSER_SIGNAL_SIGNAL:
++ /* Signal the signal. */
++ gcmkERR_BREAK(
++ gckOS_SignalUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.state));
++ break;
++
++ case gcvUSER_SIGNAL_WAIT:
++ /* Wait on the signal. */
++ status = gckOS_WaitUserSignal(Kernel->os,
++ Interface->u.UserSignal.id,
++ Interface->u.UserSignal.wait);
++ break;
++
++ default:
++ /* Invalid user signal command. */
++ gcmkERR_BREAK(gcvSTATUS_INVALID_ARGUMENT);
++ }
++#endif
++ break;
++
++ case gcvHAL_COMMIT:
++ /* Commit a command and context buffer. */
++ gcmkERR_BREAK(gckVGCOMMAND_Commit(
++ Kernel->vg->command,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.context),
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.queue),
++ kernelInterface->u.VGCommit.entryCount,
++ gcmUINT64_TO_PTR(kernelInterface->u.VGCommit.taskTable)
++ ));
++ break;
++ case gcvHAL_VERSION:
++ kernelInterface->u.Version.major = gcvVERSION_MAJOR;
++ kernelInterface->u.Version.minor = gcvVERSION_MINOR;
++ kernelInterface->u.Version.patch = gcvVERSION_PATCH;
++ kernelInterface->u.Version.build = gcvVERSION_BUILD;
++ status = gcvSTATUS_OK;
++ break;
++
++ case gcvHAL_GET_BASE_ADDRESS:
++ /* Get base address. */
++ gcmkERR_BREAK(
++ gckOS_GetBaseAddress(Kernel->os,
++ &kernelInterface->u.GetBaseAddress.baseAddress));
++ break;
++ default:
++ /* Invalid command. */
++ status = gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++OnError:
++ /* Save status. */
++ kernelInterface->status = status;
++
++ gcmkFOOTER();
++
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryCommandBuffer
++**
++** Query command buffer attributes.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckVGHARDWARE object.
++**
++** OUTPUT:
++**
++** gcsCOMMAND_BUFFER_INFO_PTR Information
++** Pointer to the information structure to receive buffer attributes.
++*/
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=0x%x *Pool=0x%x",
++ Kernel, Information);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Get the information. */
++ status = gckVGCOMMAND_QueryCommandBuffer(Kernel->vg->command, Information);
++
++ gcmkFOOTER();
++ /* Return status. */
++ return status;
++}
++
++#endif /* gcdENABLE_VG */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_vg.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_vg.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_vg.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_vg.h 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,85 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_vg_h_
++#define __gc_hal_kernel_vg_h_
++
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel_hardware.h"
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++
++/* gckKERNEL object. */
++struct _gckVGKERNEL
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* Pointer to gckINTERRUPT object. */
++ gckVGINTERRUPT interrupt;
++
++ /* Pointer to gckCOMMAND object. */
++ gckVGCOMMAND command;
++
++ /* Pointer to context. */
++ gctPOINTER context;
++
++ /* Pointer to gckMMU object. */
++ gckVGMMU mmu;
++
++ gckKERNEL kernel;
++};
++
++/* gckMMU object. */
++struct _gckVGMMU
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to gckOS object. */
++ gckOS os;
++
++ /* Pointer to gckHARDWARE object. */
++ gckVGHARDWARE hardware;
++
++ /* The page table mutex. */
++ gctPOINTER mutex;
++
++ /* Page table information. */
++ gctSIZE_T pageTableSize;
++ gctPHYS_ADDR pageTablePhysical;
++ gctPOINTER pageTableLogical;
++
++ /* Allocation index. */
++ gctUINT32 entryCount;
++ gctUINT32 entry;
++};
++
++#endif /* __gc_hal_kernel_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_video_memory.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_video_memory.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_video_memory.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/gc_hal_kernel_video_memory.c 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,2229 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_precomp.h"
++
++#define _GC_OBJ_ZONE gcvZONE_VIDMEM
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** _Split
++**
++** Split a node on the required byte boundary.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the node to split.
++**
++** gctSIZE_T Bytes
++** Number of bytes to keep in the node.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gctBOOL
++** gcvTRUE if the node was split successfully, or gcvFALSE if there is an
++** error.
++**
++*/
++static gctBOOL
++_Split(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctPOINTER pointer = gcvNULL;
++
++ /* Make sure the byte boundary makes sense. */
++ if ((Bytes <= 0) || (Bytes > Node->VidMem.bytes))
++ {
++ return gcvFALSE;
++ }
++
++ /* Allocate a new gcuVIDMEM_NODE object. */
++ if (gcmIS_ERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcuVIDMEM_NODE),
++ &pointer)))
++ {
++ /* Error. */
++ return gcvFALSE;
++ }
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE structure. */
++ node->VidMem.offset = Node->VidMem.offset + Bytes;
++ node->VidMem.bytes = Node->VidMem.bytes - Bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.locked = 0;
++ node->VidMem.memory = Node->VidMem.memory;
++ node->VidMem.pool = Node->VidMem.pool;
++ node->VidMem.physical = Node->VidMem.physical;
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ gcmkASSERT(Node->VidMem.physical != 0);
++ gcmkASSERT(Node->VidMem.logical != gcvNULL);
++ node->VidMem.processID = Node->VidMem.processID;
++ node->VidMem.physical = Node->VidMem.physical + Bytes;
++ node->VidMem.logical = Node->VidMem.logical + Bytes;
++#else
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++#endif
++
++ /* Insert node behind specified node. */
++ node->VidMem.next = Node->VidMem.next;
++ node->VidMem.prev = Node;
++ Node->VidMem.next = node->VidMem.next->VidMem.prev = node;
++
++ /* Insert free node behind specified node. */
++ node->VidMem.nextFree = Node->VidMem.nextFree;
++ node->VidMem.prevFree = Node;
++ Node->VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
++
++ /* Adjust size of specified node. */
++ Node->VidMem.bytes = Bytes;
++
++ /* Success. */
++ return gcvTRUE;
++}
++
++/*******************************************************************************
++**
++** _Merge
++**
++** Merge two adjacent nodes together.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to the first of the two nodes to merge.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++*/
++static gceSTATUS
++_Merge(
++ IN gckOS Os,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gceSTATUS status;
++
++ /* Save pointer to next node. */
++ node = Node->VidMem.next;
++#if gcdUSE_VIDMEM_PER_PID
++ /* Check if the nodes are adjacent physically. */
++ if ( ((Node->VidMem.physical + Node->VidMem.bytes) != node->VidMem.physical) ||
++ ((Node->VidMem.logical + Node->VidMem.bytes) != node->VidMem.logical) )
++ {
++ /* Can't merge. */
++ return gcvSTATUS_OK;
++ }
++#else
++
++ /* This is a good time to make sure the heap is not corrupted. */
++ if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset)
++ {
++ /* Corrupted heap. */
++ gcmkASSERT(
++ Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset);
++ return gcvSTATUS_HEAP_CORRUPTED;
++ }
++#endif
++
++ /* Adjust byte count. */
++ Node->VidMem.bytes += node->VidMem.bytes;
++
++ /* Unlink next node from linked list. */
++ Node->VidMem.next = node->VidMem.next;
++ Node->VidMem.nextFree = node->VidMem.nextFree;
++
++ Node->VidMem.next->VidMem.prev =
++ Node->VidMem.nextFree->VidMem.prevFree = Node;
++
++ /* Free next node. */
++ status = gcmkOS_SAFE_FREE(Os, node);
++ return status;
++}
++
++/******************************************************************************\
++******************************* gckVIDMEM API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckVIDMEM_ConstructVirtual
++**
++** Construct a new gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctSIZE_T Bytes
++** Number of byte to allocate.
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer.
++*/
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node = gcvNULL;
++ gctPOINTER pointer = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("Kernel=0x%x Contiguous=%d Bytes=%lu", Kernel, Contiguous, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Extract the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ /* Allocate an gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union for virtual memory. */
++ node->Virtual.kernel = Kernel;
++ node->Virtual.contiguous = Contiguous;
++ node->Virtual.logical = gcvNULL;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ node->Virtual.lockeds[i] = 0;
++ node->Virtual.pageTables[i] = gcvNULL;
++ node->Virtual.lockKernels[i] = gcvNULL;
++ }
++
++ gcmkONERROR(gckOS_GetProcessID(&node->Virtual.processID));
++
++#ifdef __QNXNTO__
++ node->Virtual.next = gcvNULL;
++ node->Virtual.freePending = gcvFALSE;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ node->Virtual.unlockPendings[i] = gcvFALSE;
++ }
++#endif
++
++ node->Virtual.freed = gcvFALSE;
++
++ gcmkONERROR(gckOS_ZeroMemory(&node->Virtual.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
++
++ /* Allocate the virtual memory. */
++ gcmkONERROR(
++ gckOS_AllocatePagedMemoryEx(os,
++ node->Virtual.contiguous,
++ node->Virtual.bytes = Bytes,
++ &node->Virtual.physical));
++
++#ifdef __QNXNTO__
++ /* Register. */
++#if gcdENABLE_VG
++ if (Kernel->core != gcvCORE_VG)
++#endif
++ {
++ gckMMU_InsertNode(Kernel->mmu, node);
++ }
++#endif
++
++ /* Return pointer to the gcuVIDMEM_NODE union. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Created virtual node 0x%x for %u bytes @ 0x%x",
++ node, Bytes, node->Virtual.physical);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (node != gcvNULL)
++ {
++ /* Free the structure. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_DestroyVirtual
++**
++** Destroy an gcuVIDMEM_NODE union for virtual memory.
++**
++** INPUT:
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gckOS os;
++ gctINT i;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
++
++ /* Extact the gckOS object pointer. */
++ os = Node->Virtual.kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++#ifdef __QNXNTO__
++ /* Unregister. */
++#if gcdENABLE_VG
++ if (Node->Virtual.kernel->core != gcvCORE_VG)
++#endif
++ {
++ gcmkVERIFY_OK(
++ gckMMU_RemoveNode(Node->Virtual.kernel->mmu, Node));
++ }
++#endif
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Node->Virtual.pageTables[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ /* Free the pages. */
++ gcmkVERIFY_OK(gckVGMMU_FreePages(Node->Virtual.lockKernels[i]->vg->mmu,
++ Node->Virtual.pageTables[i],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ /* Free the pages. */
++ gcmkVERIFY_OK(gckMMU_FreePages(Node->Virtual.lockKernels[i]->mmu,
++ Node->Virtual.pageTables[i],
++ Node->Virtual.pageCount));
++ }
++ }
++ }
++
++ /* Delete the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, Node));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Construct
++**
++** Construct a new gckVIDMEM object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 BaseAddress
++** Base address for the video memory heap.
++**
++** gctSIZE_T Bytes
++** Number of bytes in the video memory heap.
++**
++** gctSIZE_T Threshold
++** Minimum number of bytes beyond am allocation before the node is
++** split. Can be used as a minimum alignment requirement.
++**
++** gctSIZE_T BankSize
++** Number of bytes per physical memory bank. Used by bank
++** optimization.
++**
++** OUTPUT:
++**
++** gckVIDMEM * Memory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object.
++*/
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T BankSize,
++ OUT gckVIDMEM * Memory
++ )
++{
++ gckVIDMEM memory = gcvNULL;
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctINT i, banks = 0;
++ gctPOINTER pointer = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu "
++ "BankSize=%lu",
++ Os, BaseAddress, Bytes, Threshold, BankSize);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Allocate the gckVIDMEM object. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), &pointer));
++
++ memory = pointer;
++
++ /* Initialize the gckVIDMEM object. */
++ memory->object.type = gcvOBJ_VIDMEM;
++ memory->os = Os;
++
++ /* Set video memory heap information. */
++ memory->baseAddress = BaseAddress;
++ memory->bytes = Bytes;
++ memory->freeBytes = Bytes;
++ memory->threshold = Threshold;
++#if gcdUSE_VIDMEM_PER_PID
++ gcmkONERROR(gckOS_GetProcessID(&memory->pid));
++#endif
++
++ BaseAddress = 0;
++
++ /* Walk all possible banks. */
++ for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i)
++ {
++ gctSIZE_T bytes;
++
++ if (BankSize == 0)
++ {
++ /* Use all bytes for the first bank. */
++ bytes = Bytes;
++ }
++ else
++ {
++ /* Compute number of bytes for this bank. */
++ bytes = gcmALIGN(BaseAddress + 1, BankSize) - BaseAddress;
++
++ if (bytes > Bytes)
++ {
++ /* Make sure we don't exceed the total number of bytes. */
++ bytes = Bytes;
++ }
++ }
++
++ if (bytes == 0)
++ {
++ /* Mark heap is not used. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = gcvNULL;
++ continue;
++ }
++
++ /* Allocate one gcuVIDMEM_NODE union. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
++
++ node = pointer;
++
++ /* Initialize gcuVIDMEM_NODE union. */
++ node->VidMem.memory = memory;
++
++ node->VidMem.next =
++ node->VidMem.prev =
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = &memory->sentinel[i];
++
++ node->VidMem.offset = BaseAddress;
++ node->VidMem.bytes = bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.physical = 0;
++ node->VidMem.pool = gcvPOOL_UNKNOWN;
++
++ node->VidMem.locked = 0;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ gcmkONERROR(gckOS_ZeroMemory(&node->VidMem.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
++
++#ifdef __QNXNTO__
++#if gcdUSE_VIDMEM_PER_PID
++ node->VidMem.processID = memory->pid;
++ node->VidMem.physical = memory->baseAddress + BaseAddress;
++ gcmkONERROR(gckOS_GetLogicalAddressProcess(Os,
++ node->VidMem.processID,
++ node->VidMem.physical,
++ &node->VidMem.logical));
++#else
++ node->VidMem.processID = 0;
++ node->VidMem.logical = gcvNULL;
++#endif
++#endif
++
++ /* Initialize the linked list of nodes. */
++ memory->sentinel[i].VidMem.next =
++ memory->sentinel[i].VidMem.prev =
++ memory->sentinel[i].VidMem.nextFree =
++ memory->sentinel[i].VidMem.prevFree = node;
++
++ /* Mark sentinel. */
++ memory->sentinel[i].VidMem.bytes = 0;
++
++ /* Adjust address for next bank. */
++ BaseAddress += bytes;
++ Bytes -= bytes;
++ banks ++;
++ }
++
++ /* Assign all the bank mappings. */
++ memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1;
++ memory->mapping[gcvSURF_BITMAP] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_DEPTH] = banks - 1;
++ memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TEXTURE] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_VERTEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_INDEX] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TILE_STATUS] = banks - 1;
++ if (banks > 1) --banks;
++ memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0;
++
++#if gcdENABLE_VG
++ memory->mapping[gcvSURF_IMAGE] = 0;
++ memory->mapping[gcvSURF_MASK] = 0;
++ memory->mapping[gcvSURF_SCISSOR] = 0;
++#endif
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] INDEX: bank %d",
++ memory->mapping[gcvSURF_INDEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] VERTEX: bank %d",
++ memory->mapping[gcvSURF_VERTEX]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TEXTURE: bank %d",
++ memory->mapping[gcvSURF_TEXTURE]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] RENDER_TARGET: bank %d",
++ memory->mapping[gcvSURF_RENDER_TARGET]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] DEPTH: bank %d",
++ memory->mapping[gcvSURF_DEPTH]);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "[GALCORE] TILE_STATUS: bank %d",
++ memory->mapping[gcvSURF_TILE_STATUS]);
++
++ /* Return pointer to the gckVIDMEM object. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ if (memory != gcvNULL)
++ {
++ for (i = 0; i < banks; ++i)
++ {
++ /* Free the heap. */
++ gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL);
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory->sentinel[i].VidMem.next));
++ }
++
++ /* Free the object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Destroy
++**
++** Destroy an gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ )
++{
++ gcuVIDMEM_NODE_PTR node, next;
++ gctINT i;
++
++ gcmkHEADER_ARG("Memory=0x%x", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++
++ /* Walk all sentinels. */
++ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ /* Bail out of the heap is not used. */
++ if (Memory->sentinel[i].VidMem.next == gcvNULL)
++ {
++ break;
++ }
++
++ /* Walk all the nodes until we reach the sentinel. */
++ for (node = Memory->sentinel[i].VidMem.next;
++ node->VidMem.bytes != 0;
++ node = next)
++ {
++ /* Save pointer to the next node. */
++ next = node->VidMem.next;
++
++ /* Free the node. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, node));
++ }
++ }
++
++ /* Mark the object as unknown. */
++ Memory->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckVIDMEM object. */
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, Memory));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Allocate
++**
++** Allocate rectangular memory from the gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object.
++**
++** gctUINT Width
++** Width of rectangle to allocate. Make sure the width is properly
++** aligned.
++**
++** gctUINT Height
++** Height of rectangle to allocate. Make sure the height is properly
++** aligned.
++**
++** gctUINT Depth
++** Depth of rectangle to allocate. This equals to the number of
++** rectangles to allocate contiguously (i.e., for cubic maps and volume
++** textures).
++**
++** gctUINT BytesPerPixel
++** Number of bytes per pixel.
++**
++** gctUINT32 Alignment
++** Byte alignment for allocation.
++**
++** gceSURF_TYPE Type
++** Type of surface to allocate (use by bank optimization).
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that will hold the allocated memory node.
++*/
++gceSTATUS
++gckVIDMEM_Allocate(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT BytesPerPixel,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gctSIZE_T bytes;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Memory=0x%x Width=%u Height=%u Depth=%u BytesPerPixel=%u "
++ "Alignment=%u Type=%d",
++ Memory, Width, Height, Depth, BytesPerPixel, Alignment,
++ Type);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++ gcmkVERIFY_ARGUMENT(Width > 0);
++ gcmkVERIFY_ARGUMENT(Height > 0);
++ gcmkVERIFY_ARGUMENT(Depth > 0);
++ gcmkVERIFY_ARGUMENT(BytesPerPixel > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++
++ /* Compute linear size. */
++ bytes = Width * Height * Depth * BytesPerPixel;
++
++ /* Allocate through linear function. */
++ gcmkONERROR(
++ gckVIDMEM_AllocateLinear(Kernel, Memory, bytes, Alignment, Type, Node));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_BANK_ALIGNMENT
++
++#if !gcdBANK_BIT_START
++#error gcdBANK_BIT_START not defined.
++#endif
++
++#if !gcdBANK_BIT_END
++#error gcdBANK_BIT_END not defined.
++#endif
++/*******************************************************************************
++** _GetSurfaceBankAlignment
++**
++** Return the required offset alignment required to the make BaseAddress
++** aligned properly.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gcoOS object.
++**
++** gceSURF_TYPE Type
++** Type of allocation.
++**
++** gctUINT32 BaseAddress
++** Base address of current video memory node.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR AlignmentOffset
++** Pointer to a variable that will hold the number of bytes to skip in
++** the current video memory node in order to make the alignment bank
++** aligned.
++*/
++static gceSTATUS
++_GetSurfaceBankAlignment(
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 BaseAddress,
++ OUT gctUINT32_PTR AlignmentOffset
++ )
++{
++ gctUINT32 bank;
++ /* To retrieve the bank. */
++ static const gctUINT32 bankMask = (0xFFFFFFFF << gcdBANK_BIT_START)
++ ^ (0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ /* To retrieve the bank and all the lower bytes. */
++ static const gctUINT32 byteMask = ~(0xFFFFFFFF << (gcdBANK_BIT_END + 1));
++
++ gcmkHEADER_ARG("Type=%d BaseAddress=0x%x ", Type, BaseAddress);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(AlignmentOffset != gcvNULL);
++
++ switch (Type)
++ {
++ case gcvSURF_RENDER_TARGET:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the first bank. */
++ *AlignmentOffset = (bank == 0) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + 0) - (BaseAddress & byteMask);
++ break;
++
++ case gcvSURF_DEPTH:
++ bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
++
++ /* Align to the third bank. */
++ *AlignmentOffset = (bank == 2) ?
++ 0 :
++ ((1 << (gcdBANK_BIT_END + 1)) + (2 << gcdBANK_BIT_START)) - (BaseAddress & byteMask);
++
++ /* Add a channel offset at the channel bit. */
++ *AlignmentOffset += (1 << gcdBANK_CHANNEL_BIT);
++ break;
++
++ default:
++ /* no alignment needed. */
++ *AlignmentOffset = 0;
++ }
++
++ /* Return the status. */
++ gcmkFOOTER_ARG("*AlignmentOffset=%u", *AlignmentOffset);
++ return gcvSTATUS_OK;
++}
++#endif
++
++static gcuVIDMEM_NODE_PTR
++_FindNode(
++ IN gckVIDMEM Memory,
++ IN gctINT Bank,
++ IN gctSIZE_T Bytes,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Alignment
++ )
++{
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++
++#if gcdENABLE_BANK_ALIGNMENT
++ gctUINT32 bankAlignment;
++ gceSTATUS status;
++#endif
++
++ if (Memory->sentinel[Bank].VidMem.nextFree == gcvNULL)
++ {
++ /* No free nodes left. */
++ return gcvNULL;
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++ /* Walk all free nodes until we have one that is big enough or we have
++ ** reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++ gcmkONERROR(_GetSurfaceBankAlignment(
++ Type,
++ node->VidMem.memory->baseAddress + node->VidMem.offset,
++ &bankAlignment));
++
++ bankAlignment = gcmALIGN(bankAlignment, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0)
++ ? 0
++ : (*Alignment - (node->VidMem.offset % *Alignment));
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment + bankAlignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment + bankAlignment;
++ return node;
++ }
++ }
++#endif
++
++ /* Walk all free nodes until we have one that is big enough or we have
++ reached the sentinel. */
++ for (node = Memory->sentinel[Bank].VidMem.nextFree;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.nextFree)
++ {
++
++ gctINT modulo = gckMATH_ModuloInt(node->VidMem.offset, *Alignment);
++
++ /* Compute number of bytes to skip for alignment. */
++ alignment = (*Alignment == 0) ? 0 : (*Alignment - modulo);
++
++ if (alignment == *Alignment)
++ {
++ /* Node is already aligned. */
++ alignment = 0;
++ }
++
++ if (node->VidMem.bytes >= Bytes + alignment)
++ {
++ /* This node is big enough. */
++ *Alignment = alignment;
++ return node;
++ }
++ }
++
++#if gcdENABLE_BANK_ALIGNMENT
++OnError:
++#endif
++ /* Not enough memory. */
++ return gcvNULL;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_AllocateLinear
++**
++** Allocate linear memory from the gckVIDMEM object.
++**
++** INPUT:
++**
++** gckVIDMEM Memory
++** Pointer to an gckVIDMEM object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** gctUINT32 Alignment
++** Byte alignment for allocation.
++**
++** gceSURF_TYPE Type
++** Type of surface to allocate (use by bank optimization).
++**
++** OUTPUT:
++**
++** gcuVIDMEM_NODE_PTR * Node
++** Pointer to a variable that will hold the allocated memory node.
++*/
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ )
++{
++ gceSTATUS status;
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 alignment;
++ gctINT bank, i;
++ gctBOOL acquired = gcvFALSE;
++#if gcdSMALL_BLOCK_SIZE
++ gctBOOL force_allocate = (Type == gcvSURF_TILE_STATUS) || (Type & gcvSURF_VG);
++#endif
++
++ gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d",
++ Memory, Bytes, Alignment, Type);
++
++ Type &= ~gcvSURF_VG;
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Type < gcvSURF_NUM_TYPES);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Memory->os, Kernel->vidmemMutex, gcvINFINITE));
++
++ acquired = gcvTRUE;
++#if !gcdUSE_VIDMEM_PER_PID
++
++ if (Bytes > Memory->freeBytes)
++ {
++ /* Not enough memory. */
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++#endif
++
++#if gcdSMALL_BLOCK_SIZE
++ if ((!force_allocate) && (Memory->freeBytes < (Memory->bytes/gcdRATIO_FOR_SMALL_MEMORY))
++ && (Bytes >= gcdSMALL_BLOCK_SIZE)
++ )
++ {
++ /* The left memory is for small memory.*/
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++ }
++#endif
++
++ /* Find the default bank for this surface type. */
++ gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping));
++ bank = Memory->mapping[Type];
++ alignment = Alignment;
++
++#if gcdUSE_VIDMEM_PER_PID
++ if (Bytes <= Memory->freeBytes)
++ {
++#endif
++ /* Find a free node in the default bank. */
++ node = _FindNode(Memory, bank, Bytes, Type, &alignment);
++
++ /* Out of memory? */
++ if (node == gcvNULL)
++ {
++ /* Walk all lower banks. */
++ for (i = bank - 1; i >= 0; --i)
++ {
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++
++ if (node == gcvNULL)
++ {
++ /* Walk all upper banks. */
++ for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ if (Memory->sentinel[i].VidMem.nextFree == gcvNULL)
++ {
++ /* Abort when we reach unused banks. */
++ break;
++ }
++
++ /* Find a free node inside the current bank. */
++ node = _FindNode(Memory, i, Bytes, Type, &alignment);
++ if (node != gcvNULL)
++ {
++ break;
++ }
++ }
++ }
++#if gcdUSE_VIDMEM_PER_PID
++ }
++#endif
++
++ if (node == gcvNULL)
++ {
++ /* Out of memory. */
++#if gcdUSE_VIDMEM_PER_PID
++ /* Allocate more memory from shared pool. */
++ gctSIZE_T bytes;
++ gctPHYS_ADDR physical_temp;
++ gctUINT32 physical;
++ gctPOINTER logical;
++
++ bytes = gcmALIGN(Bytes, gcdUSE_VIDMEM_PER_PID_SIZE);
++
++ gcmkONERROR(gckOS_AllocateContiguous(Memory->os,
++ gcvTRUE,
++ &bytes,
++ &physical_temp,
++ &logical));
++
++ /* physical address is returned as 0 for user space. workaround. */
++ if (physical_temp == gcvNULL)
++ {
++ gcmkONERROR(gckOS_GetPhysicalAddress(Memory->os, logical, &physical));
++ }
++
++ /* Allocate one gcuVIDMEM_NODE union. */
++ gcmkONERROR(
++ gckOS_Allocate(Memory->os,
++ gcmSIZEOF(gcuVIDMEM_NODE),
++ (gctPOINTER *) &node));
++
++ /* Initialize gcuVIDMEM_NODE union. */
++ node->VidMem.memory = Memory;
++
++ node->VidMem.offset = 0;
++ node->VidMem.bytes = bytes;
++ node->VidMem.alignment = 0;
++ node->VidMem.physical = physical;
++ node->VidMem.pool = gcvPOOL_UNKNOWN;
++
++ node->VidMem.locked = 0;
++
++#ifdef __QNXNTO__
++ gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
++ node->VidMem.logical = logical;
++ gcmkASSERT(logical != gcvNULL);
++#endif
++
++ /* Insert node behind sentinel node. */
++ node->VidMem.next = Memory->sentinel[bank].VidMem.next;
++ node->VidMem.prev = &Memory->sentinel[bank];
++ Memory->sentinel[bank].VidMem.next = node->VidMem.next->VidMem.prev = node;
++
++ /* Insert free node behind sentinel node. */
++ node->VidMem.nextFree = Memory->sentinel[bank].VidMem.nextFree;
++ node->VidMem.prevFree = &Memory->sentinel[bank];
++ Memory->sentinel[bank].VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
++
++ Memory->freeBytes += bytes;
++#else
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ goto OnError;
++#endif
++ }
++
++ /* Do we have an alignment? */
++ if (alignment > 0)
++ {
++ /* Split the node so it is aligned. */
++ if (_Split(Memory->os, node, alignment))
++ {
++ /* Successful split, move to aligned node. */
++ node = node->VidMem.next;
++
++ /* Remove alignment. */
++ alignment = 0;
++ }
++ }
++
++ /* Do we have enough memory after the allocation to split it? */
++ if (node->VidMem.bytes - Bytes > Memory->threshold)
++ {
++ /* Adjust the node size. */
++ _Split(Memory->os, node, Bytes);
++ }
++
++ /* Remove the node from the free list. */
++ node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree;
++ node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree;
++ node->VidMem.nextFree =
++ node->VidMem.prevFree = gcvNULL;
++
++ /* Fill in the information. */
++ node->VidMem.alignment = alignment;
++ node->VidMem.memory = Memory;
++#ifdef __QNXNTO__
++#if !gcdUSE_VIDMEM_PER_PID
++ node->VidMem.logical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
++#else
++ gcmkASSERT(node->VidMem.logical != gcvNULL);
++#endif
++#endif
++
++ /* Adjust the number of free bytes. */
++ Memory->freeBytes -= node->VidMem.bytes;
++
++ node->VidMem.freePending = gcvFALSE;
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ node->VidMem.kernelVirtual = gcvNULL;
++#endif
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Kernel->vidmemMutex));
++
++ /* Return the pointer to the node. */
++ *Node = node;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Allocated %u bytes @ 0x%x [0x%08X]",
++ node->VidMem.bytes, node, node->VidMem.offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Node=0x%x", *Node);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Kernel->vidmemMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Free
++**
++** Free an allocated video memory node.
++**
++** INPUT:
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_Free(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ )
++{
++ gceSTATUS status;
++ gckKERNEL kernel = gcvNULL;
++ gckVIDMEM memory = gcvNULL;
++ gcuVIDMEM_NODE_PTR node;
++ gckOS os = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctINT32 i, totalLocked;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Acquire the mutex. */
++ gcmkONERROR(
++ gckOS_AcquireMutex(Kernel->os, Kernel->vidmemMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ /* Verify the arguments. */
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Node->VidMem.locked > 0)
++ {
++ /* Client still has a lock, defer free op 'till when lock reaches 0. */
++ Node->VidMem.freePending = gcvTRUE;
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++ acquired = gcvFALSE;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Node 0x%x is locked (%d)... deferring free.",
++ Node, Node->VidMem.locked);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Extract pointer to gckVIDMEM object owning the node. */
++ memory = Node->VidMem.memory;
++
++#ifdef __QNXNTO__
++#if !gcdUSE_VIDMEM_PER_PID
++ /* Reset. */
++ Node->VidMem.processID = 0;
++ Node->VidMem.logical = gcvNULL;
++#endif
++
++ /* Don't try to re-free an already freed node. */
++ if ((Node->VidMem.nextFree == gcvNULL)
++ && (Node->VidMem.prevFree == gcvNULL)
++ )
++#endif
++ {
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (Node->VidMem.kernelVirtual)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "%s(%d) Unmap %x from kernel space.",
++ __FUNCTION__, __LINE__,
++ Node->VidMem.kernelVirtual);
++
++ gcmkVERIFY_OK(
++ gckOS_UnmapPhysical(memory->os,
++ Node->VidMem.kernelVirtual,
++ Node->VidMem.bytes));
++
++ Node->VidMem.kernelVirtual = gcvNULL;
++ }
++#endif
++
++ /* Check if Node is already freed. */
++ if (Node->VidMem.nextFree)
++ {
++ /* Node is alread freed. */
++ gcmkONERROR(gcvSTATUS_INVALID_DATA);
++ }
++
++ /* Update the number of free bytes. */
++ memory->freeBytes += Node->VidMem.bytes;
++
++ /* Find the next free node. */
++ for (node = Node->VidMem.next;
++ node != gcvNULL && node->VidMem.nextFree == gcvNULL;
++ node = node->VidMem.next) ;
++
++ /* Insert this node in the free list. */
++ Node->VidMem.nextFree = node;
++ Node->VidMem.prevFree = node->VidMem.prevFree;
++
++ Node->VidMem.prevFree->VidMem.nextFree =
++ node->VidMem.prevFree = Node;
++
++ /* Is the next node a free node and not the sentinel? */
++ if ((Node->VidMem.next == Node->VidMem.nextFree)
++ && (Node->VidMem.next->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the next node. */
++ gcmkONERROR(_Merge(memory->os, node = Node));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++
++ /* Is the previous node a free node and not the sentinel? */
++ if ((Node->VidMem.prev == Node->VidMem.prevFree)
++ && (Node->VidMem.prev->VidMem.bytes != 0)
++ )
++ {
++ /* Merge this node with the previous node. */
++ gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev));
++ gcmkASSERT(node->VidMem.nextFree != node);
++ gcmkASSERT(node->VidMem.prevFree != node);
++ }
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Node 0x%x is freed.",
++ Node);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ /* Get gckKERNEL object. */
++ kernel = Node->Virtual.kernel;
++
++ /* Verify the gckKERNEL object pointer. */
++ gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL);
++
++ /* Get the gckOS object pointer. */
++ os = kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ totalLocked += Node->Virtual.lockeds[i];
++ }
++
++ if (totalLocked > 0)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM,
++ "gckVIDMEM_Free: Virtual node 0x%x is locked (%d)",
++ Node, totalLocked);
++
++ /* Set Flag */
++ Node->Virtual.freed = gcvTRUE;
++ }
++ else
++ {
++ /* Free the virtual memory. */
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(kernel->os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes));
++
++ /* Destroy the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++
++#ifdef __QNXNTO__
++/*******************************************************************************
++**
++** gcoVIDMEM_FreeHandleMemory
++**
++** Free all allocated video memory nodes for a handle.
++**
++** INPUT:
++**
++** gcoVIDMEM Memory
++** Pointer to an gcoVIDMEM object..
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckVIDMEM_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctUINT32 Pid
++ )
++{
++ gceSTATUS status;
++ gctBOOL mutex = gcvFALSE;
++ gcuVIDMEM_NODE_PTR node;
++ gctINT i;
++ gctUINT32 nodeCount = 0, byteCount = 0;
++ gctBOOL again;
++
++ gcmkHEADER_ARG("Kernel=0x%x, Memory=0x%x Pid=0x%u", Kernel, Memory, Pid);
++
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
++
++ gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
++ mutex = gcvTRUE;
++
++ /* Walk all sentinels. */
++ for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
++ {
++ /* Bail out of the heap if it is not used. */
++ if (Memory->sentinel[i].VidMem.next == gcvNULL)
++ {
++ break;
++ }
++
++ do
++ {
++ again = gcvFALSE;
++
++ /* Walk all the nodes until we reach the sentinel. */
++ for (node = Memory->sentinel[i].VidMem.next;
++ node->VidMem.bytes != 0;
++ node = node->VidMem.next)
++ {
++ /* Free the node if it was allocated by Handle. */
++ if (node->VidMem.processID == Pid)
++ {
++ /* Unlock video memory. */
++ while (node->VidMem.locked > 0)
++ {
++ gckVIDMEM_Unlock(Kernel, node, gcvSURF_TYPE_UNKNOWN, gcvNULL);
++ }
++
++ nodeCount++;
++ byteCount += node->VidMem.bytes;
++
++ /* Free video memory. */
++ gcmkVERIFY_OK(gckVIDMEM_Free(node));
++
++ /*
++ * Freeing may cause a merge which will invalidate our iteration.
++ * Don't be clever, just restart.
++ */
++ again = gcvTRUE;
++
++ break;
++ }
++#if gcdUSE_VIDMEM_PER_PID
++ else
++ {
++ gcmkASSERT(node->VidMem.processID == Pid);
++ }
++#endif
++ }
++ }
++ while (again);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mutex)
++ {
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** _NeedVirtualMapping
++**
++** Whether setup GPU page table for video node.
++**
++** INPUT:
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** gceCORE Core
++** Id of current GPU.
++**
++** OUTPUT:
++** gctBOOL * NeedMapping
++** A pointer hold the result whether Node should be mapping.
++*/
++static gceSTATUS
++_NeedVirtualMapping(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gcuVIDMEM_NODE_PTR Node,
++ OUT gctBOOL * NeedMapping
++)
++{
++ gceSTATUS status;
++ gctUINT32 phys;
++ gctUINT32 end;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Node=0x%X", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Node != gcvNULL);
++ gcmkVERIFY_ARGUMENT(NeedMapping != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Core < gcdMAX_GPU_COUNT);
++
++ if (Node->Virtual.contiguous)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ *NeedMapping = gcvFALSE;
++ }
++ else
++#endif
++ {
++ /* Convert logical address into a physical address. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddress(Kernel->os, Node->Virtual.logical, &phys));
++
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++
++ gcmkASSERT(phys >= baseAddress);
++
++ /* Subtract baseAddress to get a GPU address used for programming. */
++ phys -= baseAddress;
++
++ /* If part of region is belong to gcvPOOL_VIRTUAL,
++ ** whole region has to be mapped. */
++ end = phys + Node->Virtual.bytes - 1;
++
++ gcmkONERROR(gckHARDWARE_SplitMemory(
++ Kernel->hardware, end, &pool, &offset
++ ));
++
++ *NeedMapping = (pool == gcvPOOL_VIRTUAL);
++ }
++ }
++ else
++ {
++ *NeedMapping = gcvTRUE;
++ }
++
++ gcmkFOOTER_ARG("*NeedMapping=%d", *NeedMapping);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Lock
++**
++** Lock a video memory node and return its hardware specific address.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a gcuVIDMEM_NODE union.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Pointer to a variable that will hold the hardware specific address.
++*/
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL locked = gcvFALSE;
++ gckOS os = gcvNULL;
++ gctBOOL needMapping;
++ gctUINT32 baseAddress;
++
++ gcmkHEADER_ARG("Node=0x%x", Node);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->vidmemMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Cacheable == gcvTRUE)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
++ }
++
++ /* Increment the lock count. */
++ Node->VidMem.locked ++;
++
++ /* Return the physical address of the node. */
++#if !gcdUSE_VIDMEM_PER_PID
++ *Address = Node->VidMem.memory->baseAddress
++ + Node->VidMem.offset
++ + Node->VidMem.alignment;
++#else
++ *Address = Node->VidMem.physical;
++#endif
++
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg == gcvNULL)
++#endif
++ {
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ /* Convert physical to GPU address for old mmu. */
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++ gcmkASSERT(*Address > baseAddress);
++ *Address -= baseAddress;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Locked node 0x%x (%d) @ 0x%08X",
++ Node,
++ Node->VidMem.locked,
++ *Address);
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++ /* Verify the gckKERNEL object pointer. */
++ gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
++
++ /* Extract the gckOS object pointer. */
++ os = Node->Virtual.kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++#if gcdPAGED_MEMORY_CACHEABLE
++ /* Force video memory cacheable. */
++ Cacheable = gcvTRUE;
++#endif
++
++ gcmkONERROR(
++ gckOS_LockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Cacheable,
++ &Node->Virtual.logical,
++ &Node->Virtual.pageCount));
++
++ /* Increment the lock count. */
++ if (Node->Virtual.lockeds[Kernel->core] ++ == 0)
++ {
++ /* Is this node pending for a final unlock? */
++#ifdef __QNXNTO__
++ if (!Node->Virtual.contiguous && Node->Virtual.unlockPendings[Kernel->core])
++ {
++ /* Make sure we have a page table. */
++ gcmkASSERT(Node->Virtual.pageTables[Kernel->core] != gcvNULL);
++
++ /* Remove pending unlock. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++ }
++
++ /* First lock - create a page table. */
++ gcmkASSERT(Node->Virtual.pageTables[Kernel->core] == gcvNULL);
++
++ /* Make sure we mark our node as not flushed. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++#endif
++
++ locked = gcvTRUE;
++
++ gcmkONERROR(_NeedVirtualMapping(Kernel, Kernel->core, Node, &needMapping));
++
++ if (needMapping == gcvFALSE)
++ {
++ /* Get hardware specific address. */
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(gckVGHARDWARE_ConvertLogical(Kernel->vg->hardware,
++ Node->Virtual.logical,
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckHARDWARE_ConvertLogical(Kernel->hardware,
++ Node->Virtual.logical,
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ }
++ else
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckVGMMU_AllocatePages(Kernel->vg->mmu,
++ Node->Virtual.pageCount,
++ &Node->Virtual.pageTables[Kernel->core],
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++ else
++#endif
++ {
++ /* Allocate pages inside the MMU. */
++ gcmkONERROR(
++ gckMMU_AllocatePagesEx(Kernel->mmu,
++ Node->Virtual.pageCount,
++ Node->Virtual.type,
++ &Node->Virtual.pageTables[Kernel->core],
++ &Node->Virtual.addresses[Kernel->core]));
++ }
++
++ Node->Virtual.lockKernels[Kernel->core] = Kernel;
++
++ /* Map the pages. */
++#ifdef __QNXNTO__
++ gcmkONERROR(
++ gckOS_MapPagesEx(os,
++ Kernel->core,
++ Node->Virtual.physical,
++ Node->Virtual.logical,
++ Node->Virtual.pageCount,
++ Node->Virtual.pageTables[Kernel->core]));
++#else
++ gcmkONERROR(
++ gckOS_MapPagesEx(os,
++ Kernel->core,
++ Node->Virtual.physical,
++ Node->Virtual.pageCount,
++ Node->Virtual.pageTables[Kernel->core]));
++#endif
++
++#if gcdENABLE_VG
++ if (Kernel->core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Kernel->vg->mmu));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckMMU_Flush(Kernel->mmu));
++ }
++ }
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Mapped virtual node 0x%x to 0x%08X",
++ Node,
++ Node->Virtual.addresses[Kernel->core]);
++ }
++
++ /* Return hardware address. */
++ *Address = Node->Virtual.addresses[Kernel->core];
++ }
++
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (locked)
++ {
++ if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++ gcmkVERIFY_OK(
++ gckMMU_FreePages(Kernel->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ Node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++
++ /* Unlock the pages. */
++ gcmkVERIFY_OK(
++ gckOS_UnlockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Node->Virtual.logical
++ ));
++
++ Node->Virtual.lockeds[Kernel->core]--;
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckVIDMEM_Unlock
++**
++** Unlock a video memory node.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcuVIDMEM_NODE_PTR Node
++** Pointer to a locked gcuVIDMEM_NODE union.
++**
++** gceSURF_TYPE Type
++** Type of surface to unlock.
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable specifying whether the surface should be
++** unlocked asynchroneously or not.
++**
++** OUTPUT:
++**
++** gctBOOL * Asynchroneous
++** Pointer to a variable receiving the number of bytes used in the
++** command buffer specified by 'Commands'. If gcvNULL, there is no
++** command buffer.
++*/
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ )
++{
++ gceSTATUS status;
++ gckHARDWARE hardware;
++ gctPOINTER buffer;
++ gctSIZE_T requested, bufferSize;
++ gckCOMMAND command = gcvNULL;
++ gceKERNEL_FLUSH flush;
++ gckOS os = gcvNULL;
++ gctBOOL acquired = gcvFALSE;
++ gctBOOL commitEntered = gcvFALSE;
++ gctINT32 i, totalLocked;
++
++ gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d",
++ Node, Type, gcmOPT_VALUE(Asynchroneous));
++
++ /* Grab the mutex. */
++ gcmkONERROR(gckOS_AcquireMutex(Kernel->os, Kernel->vidmemMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++
++ /* Verify the arguments. */
++ if ((Node == gcvNULL)
++ || (Node->VidMem.memory == gcvNULL)
++ )
++ {
++ /* Invalid object. */
++ gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
++ }
++
++ /**************************** Video Memory ********************************/
++
++ if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ {
++ if (Node->VidMem.locked <= 0)
++ {
++ /* The surface was not locked. */
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ /* Decrement the lock count. */
++ Node->VidMem.locked --;
++
++ if (Asynchroneous != gcvNULL)
++ {
++ /* No need for any events. */
++ *Asynchroneous = gcvFALSE;
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unlocked node 0x%x (%d)",
++ Node,
++ Node->VidMem.locked);
++
++#ifdef __QNXNTO__
++ /* Unmap the video memory */
++ if ((Node->VidMem.locked == 0) && (Node->VidMem.logical != gcvNULL))
++ {
++ if (Kernel->core == gcvCORE_VG)
++ {
++ gckKERNEL_UnmapVideoMemory(Kernel,
++ Node->VidMem.logical,
++ Node->VidMem.processID,
++ Node->VidMem.bytes);
++ Node->VidMem.logical = gcvNULL;
++ }
++ }
++#endif /* __QNXNTO__ */
++
++ if (Node->VidMem.freePending && (Node->VidMem.locked == 0))
++ {
++ /* Client has unlocked node previously attempted to be freed by compositor. Free now. */
++ Node->VidMem.freePending = gcvFALSE;
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Deferred-freeing Node 0x%x.",
++ Node);
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++ acquired = gcvFALSE;
++
++ gcmkONERROR(gckVIDMEM_Free(Kernel, Node));
++ }
++ }
++
++ /*************************** Virtual Memory *******************************/
++
++ else
++ {
++ /* Verify the gckHARDWARE object pointer. */
++ hardware = Kernel->hardware;
++ gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
++
++ /* Verify the gckCOMMAND object pointer. */
++ command = Kernel->command;
++ gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
++
++ /* Get the gckOS object pointer. */
++ os = Kernel->os;
++ gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
++
++ if (Asynchroneous == gcvNULL)
++ {
++ if (Node->Virtual.lockeds[Kernel->core] == 0)
++ {
++ status = gcvSTATUS_MEMORY_UNLOCKED;
++ goto OnError;
++ }
++
++ /* Decrement lock count. */
++ -- Node->Virtual.lockeds[Kernel->core];
++
++ /* See if we can unlock the resources. */
++ if (Node->Virtual.lockeds[Kernel->core] == 0)
++ {
++ /* Free the page table. */
++ if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (Kernel->vg != gcvNULL)
++ {
++ gcmkONERROR(
++ gckVGMMU_FreePages(Kernel->vg->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(
++ gckMMU_FreePages(Kernel->mmu,
++ Node->Virtual.pageTables[Kernel->core],
++ Node->Virtual.pageCount));
++ }
++ /* Mark page table as freed. */
++ Node->Virtual.pageTables[Kernel->core] = gcvNULL;
++ Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
++ }
++
++#ifdef __QNXNTO__
++ /* Mark node as unlocked. */
++ Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
++#endif
++ }
++
++ for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ totalLocked += Node->Virtual.lockeds[i];
++ }
++
++ if (totalLocked == 0)
++ {
++ /* Owner have already freed this node
++ ** and we are the last one to unlock, do
++ ** real free */
++ if (Node->Virtual.freed)
++ {
++ /* Free the virtual memory. */
++ gcmkVERIFY_OK(gckOS_FreePagedMemory(Kernel->os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes));
++
++ /* Destroy the gcuVIDMEM_NODE union. */
++ gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
++
++ /* Release mutex before node is destroyed */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++
++ acquired = gcvFALSE;
++
++ /* Node has been destroyed, so we should not touch it any more */
++ gcmkFOOTER();
++ return gcvSTATUS_OK;
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Unmapped virtual node 0x%x from 0x%08X",
++ Node, Node->Virtual.addresses[Kernel->core]);
++
++ }
++
++ else
++ {
++ /* If we need to unlock a node from virtual memory we have to be
++ ** very carefull. If the node is still inside the caches we
++ ** might get a bus error later if the cache line needs to be
++ ** replaced. So - we have to flush the caches before we do
++ ** anything. */
++
++ /* gckCommand_EnterCommit() can't be called in interrupt handler because
++ ** of a dead lock situation:
++ ** process call Command_Commit(), and acquire Command->mutexQueue in
++ ** gckCOMMAND_EnterCommit(). Then it will wait for a signal which depends
++ ** on interrupt handler to generate, if interrupt handler enter
++ ** gckCommand_EnterCommit(), process will never get the signal. */
++
++ /* So, flush cache when we still in process context, and then ask caller to
++ ** schedule a event. */
++
++ gcmkONERROR(
++ gckOS_UnlockPages(os,
++ Node->Virtual.physical,
++ Node->Virtual.bytes,
++ Node->Virtual.logical));
++
++ if (!Node->Virtual.contiguous
++ && (Node->Virtual.lockeds[Kernel->core] == 1)
++#if gcdENABLE_VG
++ && (Kernel->vg == gcvNULL)
++#endif
++ )
++ {
++ if (Type == gcvSURF_BITMAP)
++ {
++ /* Flush 2D cache. */
++ flush = gcvFLUSH_2D;
++ }
++ else if (Type == gcvSURF_RENDER_TARGET)
++ {
++ /* Flush color cache. */
++ flush = gcvFLUSH_COLOR;
++ }
++ else if (Type == gcvSURF_DEPTH)
++ {
++ /* Flush depth cache. */
++ flush = gcvFLUSH_DEPTH;
++ }
++ else
++ {
++ /* No flush required. */
++ flush = (gceKERNEL_FLUSH) 0;
++ }
++ if(hardware)
++ {
++ gcmkONERROR(
++ gckHARDWARE_Flush(hardware, flush, gcvNULL, &requested));
++
++ if (requested != 0)
++ {
++ /* Acquire the command queue. */
++ gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvFALSE));
++ commitEntered = gcvTRUE;
++
++ gcmkONERROR(gckCOMMAND_Reserve(
++ command, requested, &buffer, &bufferSize
++ ));
++
++ gcmkONERROR(gckHARDWARE_Flush(
++ hardware, flush, buffer, &bufferSize
++ ));
++
++ /* Mark node as pending. */
++#ifdef __QNXNTO__
++ Node->Virtual.unlockPendings[Kernel->core] = gcvTRUE;
++#endif
++
++ gcmkONERROR(gckCOMMAND_Execute(command, requested));
++
++ /* Release the command queue. */
++ gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ commitEntered = gcvFALSE;
++ }
++ }
++ else
++ {
++ gckOS_Print("Hardware already is freed.\n");
++ }
++ }
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
++ "Scheduled unlock for virtual node 0x%x",
++ Node);
++
++ /* Schedule the surface to be unlocked. */
++ *Asynchroneous = gcvTRUE;
++ }
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++ acquired = gcvFALSE;
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous));
++ return gcvSTATUS_OK;
++
++OnError:
++ if (commitEntered)
++ {
++ /* Release the command queue mutex. */
++ gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvFALSE));
++ }
++
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Kernel->vidmemMutex));
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_base.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_base.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_base.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_base.h 2015-07-27 23:13:06.210822785 +0200
+@@ -0,0 +1,3896 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_base_h_
++#define __gc_hal_base_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#include "gc_hal_dump.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckOS * gckOS;
++typedef struct _gcoHAL * gcoHAL;
++typedef struct _gcoOS * gcoOS;
++typedef struct _gco2D * gco2D;
++
++#ifndef VIVANTE_NO_3D
++typedef struct _gco3D * gco3D;
++#endif
++
++typedef struct _gcoSURF * gcoSURF;
++typedef struct _gcsSURF_INFO * gcsSURF_INFO_PTR;
++typedef struct _gcsSURF_NODE * gcsSURF_NODE_PTR;
++typedef struct _gcsSURF_FORMAT_INFO * gcsSURF_FORMAT_INFO_PTR;
++typedef struct _gcsPOINT * gcsPOINT_PTR;
++typedef struct _gcsSIZE * gcsSIZE_PTR;
++typedef struct _gcsRECT * gcsRECT_PTR;
++typedef struct _gcsBOUNDARY * gcsBOUNDARY_PTR;
++typedef struct _gcoDUMP * gcoDUMP;
++typedef struct _gcoHARDWARE * gcoHARDWARE;
++typedef union _gcuVIDMEM_NODE * gcuVIDMEM_NODE_PTR;
++
++typedef struct gcsATOM * gcsATOM_PTR;
++
++#if gcdENABLE_VG
++typedef struct _gcoVG * gcoVG;
++typedef struct _gcsCOMPLETION_SIGNAL * gcsCOMPLETION_SIGNAL_PTR;
++typedef struct _gcsCONTEXT_MAP * gcsCONTEXT_MAP_PTR;
++#else
++typedef void * gcoVG;
++#endif
++
++#if gcdSYNC
++typedef struct _gcoFENCE * gcoFENCE;
++typedef struct _gcsSYNC_CONTEXT * gcsSYNC_CONTEXT_PTR;
++#endif
++
++typedef struct _gcoOS_SymbolsList gcoOS_SymbolsList;
++
++/******************************************************************************\
++******************************* Process local storage *************************
++\******************************************************************************/
++typedef struct _gcsPLS * gcsPLS_PTR;
++
++typedef void (* gctPLS_DESTRUCTOR) (
++ gcsPLS_PTR
++ );
++
++typedef struct _gcsPLS
++{
++ /* Global objects. */
++ gcoOS os;
++ gcoHAL hal;
++
++ /* Internal memory pool. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctPOINTER internalLogical;
++
++ /* External memory pool. */
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctPOINTER externalLogical;
++
++ /* Contiguous memory pool. */
++ gctSIZE_T contiguousSize;
++ gctPHYS_ADDR contiguousPhysical;
++ gctPOINTER contiguousLogical;
++
++ /* EGL-specific process-wide objects. */
++ gctPOINTER eglDisplayInfo;
++ gctPOINTER eglSurfaceInfo;
++ gceSURF_FORMAT eglConfigFormat;
++
++ /* PorcessID of the constrcutor process */
++ gctUINT32 processID;
++#if gcdFORCE_GAL_LOAD_TWICE
++ /* ThreadID of the constrcutor process. */
++ gctSIZE_T threadID;
++ /* Flag for calling module destructor. */
++ gctBOOL exiting;
++#endif
++
++ /* Reference count for destructor. */
++ gcsATOM_PTR reference;
++ gctBOOL bKFS;
++#if gcdUSE_NPOT_PATCH
++ gctBOOL bNeedSupportNP2Texture;
++#endif
++
++ /* Destructor for eglDisplayInfo. */
++ gctPLS_DESTRUCTOR destructor;
++}
++gcsPLS;
++
++extern gcsPLS gcPLS;
++
++/******************************************************************************\
++******************************* Thread local storage *************************
++\******************************************************************************/
++
++typedef struct _gcsTLS * gcsTLS_PTR;
++
++typedef void (* gctTLS_DESTRUCTOR) (
++ gcsTLS_PTR
++ );
++
++typedef struct _gcsTLS
++{
++ gceHARDWARE_TYPE currentType;
++ gcoHARDWARE hardware;
++ /* Only for separated 3D and 2D */
++ gcoHARDWARE hardware2D;
++#if gcdENABLE_VG
++ gcoVGHARDWARE vg;
++ gcoVG engineVG;
++#endif /* gcdENABLE_VG */
++ gctPOINTER context;
++ gctTLS_DESTRUCTOR destructor;
++ gctBOOL ProcessExiting;
++
++#ifndef VIVANTE_NO_3D
++ gco3D engine3D;
++#endif
++#if gcdSYNC
++ gctBOOL fenceEnable;
++#endif
++ gco2D engine2D;
++ gctBOOL copied;
++
++#if gcdFORCE_GAL_LOAD_TWICE
++ /* libGAL.so handle */
++ gctHANDLE handle;
++#endif
++}
++gcsTLS;
++
++/******************************************************************************\
++********************************* Enumerations *********************************
++\******************************************************************************/
++
++typedef enum _gcePLS_VALUE
++{
++ gcePLS_VALUE_EGL_DISPLAY_INFO,
++ gcePLS_VALUE_EGL_SURFACE_INFO,
++ gcePLS_VALUE_EGL_CONFIG_FORMAT_INFO,
++ gcePLS_VALUE_EGL_DESTRUCTOR_INFO,
++}
++gcePLS_VALUE;
++
++/* Video memory pool type. */
++typedef enum _gcePOOL
++{
++ gcvPOOL_UNKNOWN = 0,
++ gcvPOOL_DEFAULT,
++ gcvPOOL_LOCAL,
++ gcvPOOL_LOCAL_INTERNAL,
++ gcvPOOL_LOCAL_EXTERNAL,
++ gcvPOOL_UNIFIED,
++ gcvPOOL_SYSTEM,
++ gcvPOOL_VIRTUAL,
++ gcvPOOL_USER,
++ gcvPOOL_CONTIGUOUS,
++ gcvPOOL_DEFAULT_FORCE_CONTIGUOUS,
++ gcvPOOL_DEFAULT_FORCE_CONTIGUOUS_CACHEABLE,
++
++ gcvPOOL_NUMBER_OF_POOLS
++}
++gcePOOL;
++
++#ifndef VIVANTE_NO_3D
++/* Blending functions. */
++typedef enum _gceBLEND_FUNCTION
++{
++ gcvBLEND_ZERO,
++ gcvBLEND_ONE,
++ gcvBLEND_SOURCE_COLOR,
++ gcvBLEND_INV_SOURCE_COLOR,
++ gcvBLEND_SOURCE_ALPHA,
++ gcvBLEND_INV_SOURCE_ALPHA,
++ gcvBLEND_TARGET_COLOR,
++ gcvBLEND_INV_TARGET_COLOR,
++ gcvBLEND_TARGET_ALPHA,
++ gcvBLEND_INV_TARGET_ALPHA,
++ gcvBLEND_SOURCE_ALPHA_SATURATE,
++ gcvBLEND_CONST_COLOR,
++ gcvBLEND_INV_CONST_COLOR,
++ gcvBLEND_CONST_ALPHA,
++ gcvBLEND_INV_CONST_ALPHA,
++}
++gceBLEND_FUNCTION;
++
++/* Blending modes. */
++typedef enum _gceBLEND_MODE
++{
++ gcvBLEND_ADD,
++ gcvBLEND_SUBTRACT,
++ gcvBLEND_REVERSE_SUBTRACT,
++ gcvBLEND_MIN,
++ gcvBLEND_MAX,
++}
++gceBLEND_MODE;
++
++/* API flags. */
++typedef enum _gceAPI
++{
++ gcvAPI_D3D = 0x1,
++ gcvAPI_OPENGL = 0x2,
++ gcvAPI_OPENVG = 0x3,
++ gcvAPI_OPENCL = 0x4,
++}
++gceAPI;
++
++/* Depth modes. */
++typedef enum _gceDEPTH_MODE
++{
++ gcvDEPTH_NONE,
++ gcvDEPTH_Z,
++ gcvDEPTH_W,
++}
++gceDEPTH_MODE;
++#endif /* VIVANTE_NO_3D */
++
++typedef enum _gceWHERE
++{
++ gcvWHERE_COMMAND,
++ gcvWHERE_RASTER,
++ gcvWHERE_PIXEL,
++}
++gceWHERE;
++
++typedef enum _gceHOW
++{
++ gcvHOW_SEMAPHORE = 0x1,
++ gcvHOW_STALL = 0x2,
++ gcvHOW_SEMAPHORE_STALL = 0x3,
++}
++gceHOW;
++
++typedef enum _gceSignalHandlerType
++{
++ gcvHANDLE_SIGFPE_WHEN_SIGNAL_CODE_IS_0 = 0x1,
++}
++gceSignalHandlerType;
++
++
++#if gcdENABLE_VG
++/* gcsHAL_Limits*/
++typedef struct _gcsHAL_LIMITS
++{
++ /* chip info */
++ gceCHIPMODEL chipModel;
++ gctUINT32 chipRevision;
++ gctUINT32 featureCount;
++ gctUINT32 *chipFeatures;
++
++ /* target caps */
++ gctUINT32 maxWidth;
++ gctUINT32 maxHeight;
++ gctUINT32 multiTargetCount;
++ gctUINT32 maxSamples;
++
++}gcsHAL_LIMITS;
++#endif
++
++/******************************************************************************\
++*********** Generic Memory Allocation Optimization Using Containers ************
++\******************************************************************************/
++
++/* Generic container definition. */
++typedef struct _gcsCONTAINER_LINK * gcsCONTAINER_LINK_PTR;
++typedef struct _gcsCONTAINER_LINK
++{
++ /* Points to the next container. */
++ gcsCONTAINER_LINK_PTR next;
++}
++gcsCONTAINER_LINK;
++
++typedef struct _gcsCONTAINER_RECORD * gcsCONTAINER_RECORD_PTR;
++typedef struct _gcsCONTAINER_RECORD
++{
++ gcsCONTAINER_RECORD_PTR prev;
++ gcsCONTAINER_RECORD_PTR next;
++}
++gcsCONTAINER_RECORD;
++
++typedef struct _gcsCONTAINER * gcsCONTAINER_PTR;
++typedef struct _gcsCONTAINER
++{
++ gctUINT containerSize;
++ gctUINT recordSize;
++ gctUINT recordCount;
++ gcsCONTAINER_LINK_PTR containers;
++ gcsCONTAINER_RECORD freeList;
++ gcsCONTAINER_RECORD allocList;
++}
++gcsCONTAINER;
++
++gceSTATUS
++gcsCONTAINER_Construct(
++ IN gcsCONTAINER_PTR Container,
++ gctUINT RecordsPerContainer,
++ gctUINT RecordSize
++ );
++
++gceSTATUS
++gcsCONTAINER_Destroy(
++ IN gcsCONTAINER_PTR Container
++ );
++
++gceSTATUS
++gcsCONTAINER_AllocateRecord(
++ IN gcsCONTAINER_PTR Container,
++ OUT gctPOINTER * Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeRecord(
++ IN gcsCONTAINER_PTR Container,
++ IN gctPOINTER Record
++ );
++
++gceSTATUS
++gcsCONTAINER_FreeAll(
++ IN gcsCONTAINER_PTR Container
++ );
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++/* Construct a new gcoHAL object. */
++gceSTATUS
++gcoHAL_Construct(
++ IN gctPOINTER Context,
++ IN gcoOS Os,
++ OUT gcoHAL * Hal
++ );
++
++/* Destroy an gcoHAL object. */
++gceSTATUS
++gcoHAL_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Get pointer to gco2D object. */
++gceSTATUS
++gcoHAL_Get2DEngine(
++ IN gcoHAL Hal,
++ OUT gco2D * Engine
++ );
++
++gceSTATUS
++gcoHAL_SetFscaleValue(
++ IN gctUINT FscaleValue
++ );
++
++gceSTATUS
++gcoHAL_GetFscaleValue(
++ OUT gctUINT * FscaleValue,
++ OUT gctUINT * MinFscaleValue,
++ OUT gctUINT * MaxFscaleValue
++ );
++
++gceSTATUS
++gcoHAL_SetBltNP2Texture(
++ gctBOOL enable
++ );
++
++#ifndef VIVANTE_NO_3D
++/* Get pointer to gco3D object. */
++gceSTATUS
++gcoHAL_Get3DEngine(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++gceSTATUS
++gcoHAL_Query3DEngine(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++gceSTATUS
++gcoHAL_Set3DEngine(
++ IN gcoHAL Hal,
++ IN gco3D Engine
++ );
++
++gceSTATUS
++gcoHAL_Get3DHardware(
++ IN gcoHAL Hal,
++ OUT gcoHARDWARE * Hardware
++ );
++
++gceSTATUS
++gcoHAL_Set3DHardware(
++ IN gcoHAL Hal,
++ IN gcoHARDWARE Hardware
++ );
++
++
++#endif /* VIVANTE_NO_3D */
++
++/* Verify whether the specified feature is available in hardware. */
++gceSTATUS
++gcoHAL_IsFeatureAvailable(
++ IN gcoHAL Hal,
++ IN gceFEATURE Feature
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gcoHAL_QueryChipIdentity(
++ IN gcoHAL Hal,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++/* Query the minor features of the hardware. */
++gceSTATUS gcoHAL_QueryChipMinorFeatures(
++ IN gcoHAL Hal,
++ OUT gctUINT32* NumFeatures,
++ OUT gctUINT32* ChipMinorFeatures
++ );
++
++/* Query the amount of video memory. */
++gceSTATUS
++gcoHAL_QueryVideoMemory(
++ IN gcoHAL Hal,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Map video memory. */
++gceSTATUS
++gcoHAL_MapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap video memory. */
++gceSTATUS
++gcoHAL_UnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Schedule an unmap of a buffer mapped through its physical address. */
++gceSTATUS
++gcoHAL_ScheduleUnmapMemory(
++ IN gcoHAL Hal,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T NumberOfBytes,
++ IN gctPOINTER Logical
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoHAL_MapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR GPUAddress
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoHAL_UnmapUserMemory(
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 GPUAddress
++ );
++
++/* Schedule an unmap of a user buffer using event mechanism. */
++gceSTATUS
++gcoHAL_ScheduleUnmapUserMemory(
++ IN gcoHAL Hal,
++ IN gctPOINTER Info,
++ IN gctSIZE_T Size,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory
++ );
++
++/* Commit the current command buffer. */
++gceSTATUS
++gcoHAL_Commit(
++ IN gcoHAL Hal,
++ IN gctBOOL Stall
++ );
++
++/* Query the tile capabilities. */
++gceSTATUS
++gcoHAL_QueryTiled(
++ IN gcoHAL Hal,
++ OUT gctINT32 * TileWidth2D,
++ OUT gctINT32 * TileHeight2D,
++ OUT gctINT32 * TileWidth3D,
++ OUT gctINT32 * TileHeight3D
++ );
++
++gceSTATUS
++gcoHAL_Compact(
++ IN gcoHAL Hal
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoHAL_ProfileStart(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_ProfileEnd(
++ IN gcoHAL Hal,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++/* Power Management */
++gceSTATUS
++gcoHAL_SetPowerManagementState(
++ IN gcoHAL Hal,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gcoHAL_QueryPowerManagementState(
++ IN gcoHAL Hal,
++ OUT gceCHIPPOWERSTATE *State
++ );
++
++/* Set the filter type for filter blit. */
++gceSTATUS
++gcoHAL_SetFilterType(
++ IN gcoHAL Hal,
++ IN gceFILTER_TYPE FilterType
++ );
++
++gceSTATUS
++gcoHAL_GetDump(
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++/* Call the kernel HAL layer. */
++gceSTATUS
++gcoHAL_Call(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++gceSTATUS
++gcoHAL_GetPatchID(
++ IN gcoHAL Hal,
++ OUT gcePATCH_ID * PatchID
++ );
++
++/* Schedule an event. */
++gceSTATUS
++gcoHAL_ScheduleEvent(
++ IN gcoHAL Hal,
++ IN OUT gcsHAL_INTERFACE_PTR Interface
++ );
++
++/* Destroy a surface. */
++gceSTATUS
++gcoHAL_DestroySurface(
++ IN gcoHAL Hal,
++ IN gcoSURF Surface
++ );
++
++/* Request a start/stop timestamp. */
++gceSTATUS
++gcoHAL_SetTimer(
++ IN gcoHAL Hal,
++ IN gctUINT32 Index,
++ IN gctBOOL Start
++ );
++
++/* Get Time delta from a Timer in microseconds. */
++gceSTATUS
++gcoHAL_GetTimerTime(
++ IN gcoHAL Hal,
++ IN gctUINT32 Timer,
++ OUT gctINT32_PTR TimeDelta
++ );
++
++/* set timeout value. */
++gceSTATUS
++gcoHAL_SetTimeOut(
++ IN gcoHAL Hal,
++ IN gctUINT32 timeOut
++ );
++
++gceSTATUS
++gcoHAL_SetHardwareType(
++ IN gcoHAL Hal,
++ IN gceHARDWARE_TYPE HardwardType
++ );
++
++gceSTATUS
++gcoHAL_GetHardwareType(
++ IN gcoHAL Hal,
++ OUT gceHARDWARE_TYPE * HardwardType
++ );
++
++gceSTATUS
++gcoHAL_QueryChipCount(
++ IN gcoHAL Hal,
++ OUT gctINT32 * Count
++ );
++
++gceSTATUS
++gcoHAL_QuerySeparated3D2D(
++ IN gcoHAL Hal
++ );
++
++gceSTATUS
++gcoHAL_QuerySpecialHint(
++ IN gceSPECIAL_HINT Hint
++ );
++
++gceSTATUS
++gcoHAL_SetSpecialHintData(
++ IN gcoHARDWARE Hardware
++ );
++
++/* Get pointer to gcoVG object. */
++gceSTATUS
++gcoHAL_GetVGEngine(
++ IN gcoHAL Hal,
++ OUT gcoVG * Engine
++ );
++
++#if gcdENABLE_VG
++gceSTATUS
++gcoHAL_QueryChipLimits(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ OUT gcsHAL_LIMITS *Limits);
++
++gceSTATUS
++gcoHAL_QueryChipFeature(
++ IN gcoHAL Hal,
++ IN gctINT32 Chip,
++ IN gceFEATURE Feature);
++
++#endif
++/******************************************************************************\
++********************************** gcoOS Object *********************************
++\******************************************************************************/
++
++/* Get PLS value for given key */
++gctPOINTER
++gcoOS_GetPLSValue(
++ IN gcePLS_VALUE key
++ );
++
++/* Set PLS value of a given key */
++void
++gcoOS_SetPLSValue(
++ IN gcePLS_VALUE key,
++ OUT gctPOINTER value
++ );
++
++/* Get access to the thread local storage. */
++gceSTATUS
++gcoOS_GetTLS(
++ OUT gcsTLS_PTR * TLS
++ );
++
++ /* Copy the TLS from a source thread. */
++ gceSTATUS gcoOS_CopyTLS(IN gcsTLS_PTR Source);
++
++/* Destroy the objects associated with the current thread. */
++void
++gcoOS_FreeThreadData(
++ IN gctBOOL ProcessExiting
++ );
++
++/* Construct a new gcoOS object. */
++gceSTATUS
++gcoOS_Construct(
++ IN gctPOINTER Context,
++ OUT gcoOS * Os
++ );
++
++/* Destroy an gcoOS object. */
++gceSTATUS
++gcoOS_Destroy(
++ IN gcoOS Os
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gcoOS_GetBaseAddress(
++ IN gcoOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gcoOS_Allocate(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Get allocated memory size. */
++gceSTATUS
++gcoOS_GetMemorySize(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gcoOS_Free(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoOS_AllocateMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoOS_FreeMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gcoOS_AllocateContiguous(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gcoOS_FreeContiguous(
++ IN gcoOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Allocate video memory. */
++gceSTATUS
++gcoOS_AllocateVideoMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN gctBOOL InCacheable,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * Physical,
++ OUT gctPOINTER * Logical,
++ OUT gctPOINTER * Handle
++ );
++
++/* Free video memory. */
++gceSTATUS
++gcoOS_FreeVideoMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Handle
++ );
++
++gceSTATUS
++gcoSURF_GetBankOffsetBytes(
++ IN gcoSURF Surfce,
++ IN gceSURF_TYPE Type,
++ IN gctUINT32 Stride,
++ IN gctUINT32_PTR Bytes
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Map user memory. */
++gceSTATUS
++gcoOS_MapUserMemoryEx(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gcoOS_UnmapUserMemory(
++ IN gcoOS Os,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/* Device I/O Control call to the kernel HAL layer. */
++gceSTATUS
++gcoOS_DeviceControl(
++ IN gcoOS Os,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ IN gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/* Allocate non paged memory. */
++gceSTATUS
++gcoOS_AllocateNonPagedMemory(
++ IN gcoOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non paged memory. */
++gceSTATUS
++gcoOS_FreeNonPagedMemory(
++ IN gcoOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++#define gcmOS_SAFE_FREE(os, mem) \
++ gcoOS_Free(os, mem); \
++ mem = gcvNULL
++
++#define gcmkOS_SAFE_FREE(os, mem) \
++ gckOS_Free(os, mem); \
++ mem = gcvNULL
++
++typedef enum _gceFILE_MODE
++{
++ gcvFILE_CREATE = 0,
++ gcvFILE_APPEND,
++ gcvFILE_READ,
++ gcvFILE_CREATETEXT,
++ gcvFILE_APPENDTEXT,
++ gcvFILE_READTEXT,
++}
++gceFILE_MODE;
++
++/* Open a file. */
++gceSTATUS
++gcoOS_Open(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ IN gceFILE_MODE Mode,
++ OUT gctFILE * File
++ );
++
++/* Close a file. */
++gceSTATUS
++gcoOS_Close(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Read data from a file. */
++gceSTATUS
++gcoOS_Read(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctPOINTER Data,
++ OUT gctSIZE_T * ByteRead
++ );
++
++/* Write data to a file. */
++gceSTATUS
++gcoOS_Write(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data to a file. */
++gceSTATUS
++gcoOS_Flush(
++ IN gcoOS Os,
++ IN gctFILE File
++ );
++
++/* Close a file descriptor. */
++gceSTATUS
++gcoOS_CloseFD(
++ IN gcoOS Os,
++ IN gctINT FD
++ );
++
++/* Dup file descriptor to another. */
++gceSTATUS
++gcoOS_DupFD(
++ IN gcoOS Os,
++ IN gctINT FD,
++ OUT gctINT * FD2
++ );
++
++/* Create an endpoint for communication. */
++gceSTATUS
++gcoOS_Socket(
++ IN gcoOS Os,
++ IN gctINT Domain,
++ IN gctINT Type,
++ IN gctINT Protocol,
++ OUT gctINT *SockFd
++ );
++
++/* Close a socket. */
++gceSTATUS
++gcoOS_CloseSocket(
++ IN gcoOS Os,
++ IN gctINT SockFd
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_Connect(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctCONST_POINTER HostName,
++ IN gctUINT Port);
++
++/* Shut down part of connection on a socket. */
++gceSTATUS
++gcoOS_Shutdown(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT How
++ );
++
++/* Send a message on a socket. */
++gceSTATUS
++gcoOS_Send(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data,
++ IN gctINT Flags
++ );
++
++/* Initiate a connection on a socket. */
++gceSTATUS
++gcoOS_WaitForSend(
++ IN gcoOS Os,
++ IN gctINT SockFd,
++ IN gctINT Seconds,
++ IN gctINT MicroSeconds);
++
++/* Get environment variable value. */
++gceSTATUS
++gcoOS_GetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ OUT gctSTRING * Value
++ );
++
++/* Set environment variable value. */
++gceSTATUS
++gcoOS_SetEnv(
++ IN gcoOS Os,
++ IN gctCONST_STRING VarName,
++ IN gctSTRING Value
++ );
++
++/* Get current working directory. */
++gceSTATUS
++gcoOS_GetCwd(
++ IN gcoOS Os,
++ IN gctINT SizeInBytes,
++ OUT gctSTRING Buffer
++ );
++
++/* Get file status info. */
++gceSTATUS
++gcoOS_Stat(
++ IN gcoOS Os,
++ IN gctCONST_STRING FileName,
++ OUT gctPOINTER Buffer
++ );
++
++typedef enum _gceFILE_WHENCE
++{
++ gcvFILE_SEEK_SET,
++ gcvFILE_SEEK_CUR,
++ gcvFILE_SEEK_END
++}
++gceFILE_WHENCE;
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_Seek(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Offset,
++ IN gceFILE_WHENCE Whence
++ );
++
++/* Set the current position of a file. */
++gceSTATUS
++gcoOS_SetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ IN gctUINT32 Position
++ );
++
++/* Get the current position of a file. */
++gceSTATUS
++gcoOS_GetPos(
++ IN gcoOS Os,
++ IN gctFILE File,
++ OUT gctUINT32 * Position
++ );
++
++/* Same as strstr. */
++gceSTATUS
++gcoOS_StrStr(
++ IN gctCONST_STRING String,
++ IN gctCONST_STRING SubString,
++ OUT gctSTRING * Output
++ );
++
++/* Find the last occurance of a character inside a string. */
++gceSTATUS
++gcoOS_StrFindReverse(
++ IN gctCONST_STRING String,
++ IN gctINT8 Character,
++ OUT gctSTRING * Output
++ );
++
++gceSTATUS
++gcoOS_StrDup(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ OUT gctSTRING * Target
++ );
++
++/* Copy a string. */
++gceSTATUS
++gcoOS_StrCopySafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Append a string. */
++gceSTATUS
++gcoOS_StrCatSafe(
++ IN gctSTRING Destination,
++ IN gctSIZE_T DestinationSize,
++ IN gctCONST_STRING Source
++ );
++
++/* Compare two strings. */
++gceSTATUS
++gcoOS_StrCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2
++ );
++
++/* Compare characters of two strings. */
++gceSTATUS
++gcoOS_StrNCmp(
++ IN gctCONST_STRING String1,
++ IN gctCONST_STRING String2,
++ IN gctSIZE_T Count
++ );
++
++/* Convert string to float. */
++gceSTATUS
++gcoOS_StrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert hex string to integer. */
++gceSTATUS
++gcoOS_HexStrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++/* Convert hex string to float. */
++gceSTATUS
++gcoOS_HexStrToFloat(
++ IN gctCONST_STRING String,
++ OUT gctFLOAT * Float
++ );
++
++/* Convert string to integer. */
++gceSTATUS
++gcoOS_StrToInt(
++ IN gctCONST_STRING String,
++ OUT gctINT * Int
++ );
++
++gceSTATUS
++gcoOS_MemCmp(
++ IN gctCONST_POINTER Memory1,
++ IN gctCONST_POINTER Memory2,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_PrintStrSafe(
++ OUT gctSTRING String,
++ IN gctSIZE_T StringSize,
++ IN OUT gctUINT * Offset,
++ IN gctCONST_STRING Format,
++ ...
++ );
++
++gceSTATUS
++gcoOS_LoadLibrary(
++ IN gcoOS Os,
++ IN gctCONST_STRING Library,
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeLibrary(
++ IN gcoOS Os,
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_GetProcAddress(
++ IN gcoOS Os,
++ IN gctHANDLE Handle,
++ IN gctCONST_STRING Name,
++ OUT gctPOINTER * Function
++ );
++
++gceSTATUS
++gcoOS_Compact(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_AddSignalHandler (
++ IN gceSignalHandlerType SignalHandlerType
++ );
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gcoOS_ProfileStart(
++ IN gcoOS Os
++ );
++
++gceSTATUS
++gcoOS_ProfileEnd(
++ IN gcoOS Os,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_SetProfileSetting(
++ IN gcoOS Os,
++ IN gctBOOL Enable,
++ IN gctCONST_STRING FileName
++ );
++#endif
++
++gctBOOL
++gcoOS_IsNeededSupportNP2Texture(
++ IN gctCHAR* ProcName
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gcoOS_QueryVideoMemory(
++ IN gcoOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Detect if the process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByNamePid(
++ IN gctCONST_STRING Name,
++ IN gctHANDLE Pid
++ );
++
++/* Detect if the current process is the executable specified. */
++gceSTATUS
++gcoOS_DetectProcessByName(
++ IN gctCONST_STRING Name
++ );
++
++gceSTATUS
++gcoOS_DetectProcessByEncryptedName(
++ IN gctCONST_STRING Name
++ );
++
++#if defined(ANDROID)
++gceSTATUS
++gcoOS_DetectProgrameByEncryptedSymbols(
++ IN gcoOS_SymbolsList Symbols
++ );
++#endif
++
++/*----------------------------------------------------------------------------*/
++/*----- Atoms ----------------------------------------------------------------*/
++
++/* Construct an atom. */
++gceSTATUS
++gcoOS_AtomConstruct(
++ IN gcoOS Os,
++ OUT gcsATOM_PTR * Atom
++ );
++
++/* Destroy an atom. */
++gceSTATUS
++gcoOS_AtomDestroy(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom
++ );
++
++/* Increment an atom. */
++gceSTATUS
++gcoOS_AtomIncrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++/* Decrement an atom. */
++gceSTATUS
++gcoOS_AtomDecrement(
++ IN gcoOS Os,
++ IN gcsATOM_PTR Atom,
++ OUT gctINT32_PTR OldValue
++ );
++
++gctHANDLE
++gcoOS_GetCurrentProcessID(
++ void
++ );
++
++gctHANDLE
++gcoOS_GetCurrentThreadID(
++ void
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Time -----------------------------------------------------------------*/
++
++/* Get the number of milliseconds since the system started. */
++gctUINT32
++gcoOS_GetTicks(
++ void
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gcoOS_GetTime(
++ gctUINT64_PTR Time
++ );
++
++/* Get CPU usage in microseconds. */
++gceSTATUS
++gcoOS_GetCPUTime(
++ gctUINT64_PTR CPUTime
++ );
++
++/* Get memory usage. */
++gceSTATUS
++gcoOS_GetMemoryUsage(
++ gctUINT32_PTR MaxRSS,
++ gctUINT32_PTR IxRSS,
++ gctUINT32_PTR IdRSS,
++ gctUINT32_PTR IsRSS
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gcoOS_Delay(
++ IN gcoOS Os,
++ IN gctUINT32 Delay
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Threads --------------------------------------------------------------*/
++
++#ifdef _WIN32
++/* Cannot include windows.h here becuase "near" and "far"
++ * which are used in gcsDEPTH_INFO, are defined to nothing in WinDef.h.
++ * So, use the real value of DWORD and WINAPI, instead.
++ * DWORD is unsigned long, and WINAPI is __stdcall.
++ * If these two are change in WinDef.h, the following two typdefs
++ * need to be changed, too.
++ */
++typedef unsigned long gctTHREAD_RETURN;
++typedef unsigned long (__stdcall * gcTHREAD_ROUTINE)(void * Argument);
++#else
++typedef void * gctTHREAD_RETURN;
++typedef void * (* gcTHREAD_ROUTINE)(void *);
++#endif
++
++/* Create a new thread. */
++gceSTATUS
++gcoOS_CreateThread(
++ IN gcoOS Os,
++ IN gcTHREAD_ROUTINE Worker,
++ IN gctPOINTER Argument,
++ OUT gctPOINTER * Thread
++ );
++
++/* Close a thread. */
++gceSTATUS
++gcoOS_CloseThread(
++ IN gcoOS Os,
++ IN gctPOINTER Thread
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Mutexes --------------------------------------------------------------*/
++
++/* Create a new mutex. */
++gceSTATUS
++gcoOS_CreateMutex(
++ IN gcoOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gcoOS_DeleteMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gcoOS_AcquireMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gcoOS_ReleaseMutex(
++ IN gcoOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Signals --------------------------------------------------------------*/
++
++/* Create a signal. */
++gceSTATUS
++gcoOS_CreateSignal(
++ IN gcoOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gcoOS_DestroySignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gcoOS_Signal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gcoOS_WaitSignal(
++ IN gcoOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a signal from another process */
++gceSTATUS
++gcoOS_MapSignal(
++ IN gctSIGNAL RemoteSignal,
++ OUT gctSIGNAL * LocalSignal
++ );
++
++/* Unmap a signal mapped from another process */
++gceSTATUS
++gcoOS_UnmapSignal(
++ IN gctSIGNAL Signal
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Android Native Fence -------------------------------------------------*/
++
++/* Create sync point. */
++gceSTATUS
++gcoOS_CreateSyncPoint(
++ IN gcoOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++/* Destroy sync point. */
++gceSTATUS
++gcoOS_DestroySyncPoint(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++/* Create native fence. */
++gceSTATUS
++gcoOS_CreateNativeFence(
++ IN gcoOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++/* Wait on native fence. */
++gceSTATUS
++gcoOS_WaitNativeFence(
++ IN gcoOS Os,
++ IN gctINT FenceFD,
++ IN gctUINT32 Timeout
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----- Memory Access and Cache ----------------------------------------------*/
++
++/* Write a register. */
++gceSTATUS
++gcoOS_WriteRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Read a register. */
++gceSTATUS
++gcoOS_ReadRegister(
++ IN gcoOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++gceSTATUS
++gcoOS_CacheClean(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheFlush(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_CacheInvalidate(
++ IN gcoOS Os,
++ IN gctUINT64 Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoOS_MemoryBarrier(
++ IN gcoOS Os,
++ IN gctPOINTER Logical
++ );
++
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ );
++
++gceSTATUS
++gcoOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ );
++
++gceSTATUS
++gcoOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ );
++
++#define _gcmPROFILE_INIT(prefix, freq, start) \
++ do { \
++ prefix ## OS_QueryProfileTickRate(&(freq)); \
++ prefix ## OS_GetProfileTick(&(start)); \
++ } while (gcvFALSE)
++
++#define _gcmPROFILE_QUERY(prefix, start, ticks) \
++ do { \
++ prefix ## OS_GetProfileTick(&(ticks)); \
++ (ticks) = ((ticks) > (start)) ? ((ticks) - (start)) \
++ : (~0ull - (start) + (ticks) + 1); \
++ } while (gcvFALSE)
++
++#if gcdENABLE_PROFILING
++# define gcmkPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gck, freq, start)
++# define gcmkPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gck, start, ticks)
++# define gcmPROFILE_INIT(freq, start) _gcmPROFILE_INIT(gco, freq, start)
++# define gcmPROFILE_QUERY(start, ticks) _gcmPROFILE_QUERY(gco, start, ticks)
++# define gcmPROFILE_ONLY(x) x
++# define gcmPROFILE_ELSE(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ONLY(x) x
++# define gcmPROFILE_DECLARE_ELSE(x) typedef x
++#else
++# define gcmkPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmkPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_INIT(start, freq) do { } while (gcvFALSE)
++# define gcmPROFILE_QUERY(start, ticks) do { } while (gcvFALSE)
++# define gcmPROFILE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_ELSE(x) x
++# define gcmPROFILE_DECLARE_ONLY(x) do { } while (gcvFALSE)
++# define gcmPROFILE_DECLARE_ELSE(x) x
++#endif
++
++/*******************************************************************************
++** gcoMATH object
++*/
++
++#define gcdPI 3.14159265358979323846f
++
++/* Kernel. */
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++/* User. */
++gctUINT32
++gcoMATH_Log2in5dot5(
++ IN gctINT X
++ );
++
++
++gctFLOAT
++gcoMATH_UIntAsFloat(
++ IN gctUINT32 X
++ );
++
++gctUINT32
++gcoMATH_FloatAsUInt(
++ IN gctFLOAT X
++ );
++
++gctBOOL
++gcoMATH_CompareEqualF(
++ IN gctFLOAT X,
++ IN gctFLOAT Y
++ );
++
++gctUINT16
++gcoMATH_UInt8AsFloat16(
++ IN gctUINT8 X
++ );
++
++/******************************************************************************\
++**************************** Coordinate Structures *****************************
++\******************************************************************************/
++
++typedef struct _gcsPOINT
++{
++ gctINT32 x;
++ gctINT32 y;
++}
++gcsPOINT;
++
++typedef struct _gcsSIZE
++{
++ gctINT32 width;
++ gctINT32 height;
++}
++gcsSIZE;
++
++typedef struct _gcsRECT
++{
++ gctINT32 left;
++ gctINT32 top;
++ gctINT32 right;
++ gctINT32 bottom;
++}
++gcsRECT;
++
++typedef union _gcsPIXEL
++{
++ struct
++ {
++ gctFLOAT r, g, b, a;
++ gctFLOAT d, s;
++ } pf;
++
++ struct
++ {
++ gctINT32 r, g, b, a;
++ gctINT32 d, s;
++ } pi;
++
++ struct
++ {
++ gctUINT32 r, g, b, a;
++ gctUINT32 d, s;
++ } pui;
++
++} gcsPIXEL;
++
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- gcoSURF Common ------------------------------*/
++
++/* Color format classes. */
++typedef enum _gceFORMAT_CLASS
++{
++ gcvFORMAT_CLASS_RGBA = 4500,
++ gcvFORMAT_CLASS_YUV,
++ gcvFORMAT_CLASS_INDEX,
++ gcvFORMAT_CLASS_LUMINANCE,
++ gcvFORMAT_CLASS_BUMP,
++ gcvFORMAT_CLASS_DEPTH,
++}
++gceFORMAT_CLASS;
++
++/* Special enums for width field in gcsFORMAT_COMPONENT. */
++typedef enum _gceCOMPONENT_CONTROL
++{
++ gcvCOMPONENT_NOTPRESENT = 0x00,
++ gcvCOMPONENT_DONTCARE = 0x80,
++ gcvCOMPONENT_WIDTHMASK = 0x7F,
++ gcvCOMPONENT_ODD = 0x80
++}
++gceCOMPONENT_CONTROL;
++
++/* Color format component parameters. */
++typedef struct _gcsFORMAT_COMPONENT
++{
++ gctUINT8 start;
++ gctUINT8 width;
++}
++gcsFORMAT_COMPONENT;
++
++/* RGBA color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_RGBA
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT red;
++ gcsFORMAT_COMPONENT green;
++ gcsFORMAT_COMPONENT blue;
++}
++gcsFORMAT_CLASS_TYPE_RGBA;
++
++/* YUV color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_YUV
++{
++ gcsFORMAT_COMPONENT y;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT v;
++}
++gcsFORMAT_CLASS_TYPE_YUV;
++
++/* Index color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_INDEX
++{
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_INDEX;
++
++/* Luminance color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_LUMINANCE
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT value;
++}
++gcsFORMAT_CLASS_TYPE_LUMINANCE;
++
++/* Bump map color format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_BUMP
++{
++ gcsFORMAT_COMPONENT alpha;
++ gcsFORMAT_COMPONENT l;
++ gcsFORMAT_COMPONENT v;
++ gcsFORMAT_COMPONENT u;
++ gcsFORMAT_COMPONENT q;
++ gcsFORMAT_COMPONENT w;
++}
++gcsFORMAT_CLASS_TYPE_BUMP;
++
++/* Depth and stencil format class. */
++typedef struct _gcsFORMAT_CLASS_TYPE_DEPTH
++{
++ gcsFORMAT_COMPONENT depth;
++ gcsFORMAT_COMPONENT stencil;
++}
++gcsFORMAT_CLASS_TYPE_DEPTH;
++
++/* Format parameters. */
++typedef struct _gcsSURF_FORMAT_INFO
++{
++ /* Format code and class. */
++ gceSURF_FORMAT format;
++ gceFORMAT_CLASS fmtClass;
++
++ /* The size of one pixel in bits. */
++ gctUINT8 bitsPerPixel;
++
++ /* Component swizzle. */
++ gceSURF_SWIZZLE swizzle;
++
++ /* Some formats have two neighbour pixels interleaved together. */
++ /* To describe such format, set the flag to 1 and add another */
++ /* like this one describing the odd pixel format. */
++ gctUINT8 interleaved;
++
++ /* Format components. */
++ union
++ {
++ gcsFORMAT_CLASS_TYPE_BUMP bump;
++ gcsFORMAT_CLASS_TYPE_RGBA rgba;
++ gcsFORMAT_CLASS_TYPE_YUV yuv;
++ gcsFORMAT_CLASS_TYPE_LUMINANCE lum;
++ gcsFORMAT_CLASS_TYPE_INDEX index;
++ gcsFORMAT_CLASS_TYPE_DEPTH depth;
++ } u;
++}
++gcsSURF_FORMAT_INFO;
++
++/* Frame buffer information. */
++typedef struct _gcsSURF_FRAMEBUFFER
++{
++ gctPOINTER logical;
++ gctUINT width, height;
++ gctINT stride;
++ gceSURF_FORMAT format;
++}
++gcsSURF_FRAMEBUFFER;
++
++typedef struct _gcsVIDMEM_NODE_SHARED_INFO
++{
++ gctBOOL tileStatusDisabled;
++ gcsPOINT SrcOrigin;
++ gcsPOINT DestOrigin;
++ gcsSIZE RectSize;
++ gctUINT32 clearValue;
++}
++gcsVIDMEM_NODE_SHARED_INFO;
++
++/* Generic pixel component descriptors. */
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XXX8;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_XX8X;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_X8XX;
++extern gcsFORMAT_COMPONENT gcvPIXEL_COMP_8XXX;
++
++typedef enum _gceORIENTATION
++{
++ gcvORIENTATION_TOP_BOTTOM,
++ gcvORIENTATION_BOTTOM_TOP,
++}
++gceORIENTATION;
++
++
++/* Construct a new gcoSURF object. */
++gceSTATUS
++gcoSURF_Construct(
++ IN gcoHAL Hal,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++/* Destroy an gcoSURF object. */
++gceSTATUS
++gcoSURF_Destroy(
++ IN gcoSURF Surface
++ );
++
++/* Map user-allocated surface. */
++gceSTATUS
++gcoSURF_MapUserSurface(
++ IN gcoSURF Surface,
++ IN gctUINT Alignment,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Query vid mem node info. */
++gceSTATUS
++gcoSURF_QueryVidMemNode(
++ IN gcoSURF Surface,
++ OUT gctUINT64 * Node,
++ OUT gcePOOL * Pool,
++ OUT gctUINT_PTR Bytes
++ );
++
++/* Set the color type of the surface. */
++gceSTATUS
++gcoSURF_SetColorType(
++ IN gcoSURF Surface,
++ IN gceSURF_COLOR_TYPE ColorType
++ );
++
++/* Get the color type of the surface. */
++gceSTATUS
++gcoSURF_GetColorType(
++ IN gcoSURF Surface,
++ OUT gceSURF_COLOR_TYPE *ColorType
++ );
++
++/* Set the surface ration angle. */
++gceSTATUS
++gcoSURF_SetRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_SetPreRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION Rotation
++ );
++
++gceSTATUS
++gcoSURF_GetPreRotation(
++ IN gcoSURF Surface,
++ IN gceSURF_ROTATION *Rotation
++ );
++
++gceSTATUS
++gcoSURF_IsValid(
++ IN gcoSURF Surface
++ );
++
++#ifndef VIVANTE_NO_3D
++/* Verify and return the state of the tile status mechanism. */
++gceSTATUS
++gcoSURF_IsTileStatusSupported(
++ IN gcoSURF Surface
++ );
++
++/* Process tile status for the specified surface. */
++gceSTATUS
++gcoSURF_SetTileStatus(
++ IN gcoSURF Surface
++ );
++
++/* Enable tile status for the specified surface. */
++gceSTATUS
++gcoSURF_EnableTileStatus(
++ IN gcoSURF Surface
++ );
++
++/* Disable tile status for the specified surface. */
++gceSTATUS
++gcoSURF_DisableTileStatus(
++ IN gcoSURF Surface,
++ IN gctBOOL Decompress
++ );
++
++gceSTATUS
++gcoSURF_AlignResolveRect(
++ IN gcoSURF Surf,
++ IN gcsPOINT_PTR RectOrigin,
++ IN gcsPOINT_PTR RectSize,
++ OUT gcsPOINT_PTR AlignedOrigin,
++ OUT gcsPOINT_PTR AlignedSize
++ );
++#endif /* VIVANTE_NO_3D */
++
++/* Get surface size. */
++gceSTATUS
++gcoSURF_GetSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctUINT * Depth
++ );
++
++/* Get surface aligned sizes. */
++gceSTATUS
++gcoSURF_GetAlignedSize(
++ IN gcoSURF Surface,
++ OUT gctUINT * Width,
++ OUT gctUINT * Height,
++ OUT gctINT * Stride
++ );
++
++/* Get alignments. */
++gceSTATUS
++gcoSURF_GetAlignment(
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT * AddressAlignment,
++ OUT gctUINT * XAlignment,
++ OUT gctUINT * YAlignment
++ );
++
++/* Get surface type and format. */
++gceSTATUS
++gcoSURF_GetFormat(
++ IN gcoSURF Surface,
++ OUT gceSURF_TYPE * Type,
++ OUT gceSURF_FORMAT * Format
++ );
++
++/* Get surface tiling. */
++gceSTATUS
++gcoSURF_GetTiling(
++ IN gcoSURF Surface,
++ OUT gceTILING * Tiling
++ );
++
++/* Lock the surface. */
++gceSTATUS
++gcoSURF_Lock(
++ IN gcoSURF Surface,
++ IN OUT gctUINT32 * Address,
++ IN OUT gctPOINTER * Memory
++ );
++
++/* Unlock the surface. */
++gceSTATUS
++gcoSURF_Unlock(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory
++ );
++
++/* Return pixel format parameters. */
++gceSTATUS
++gcoSURF_QueryFormat(
++ IN gceSURF_FORMAT Format,
++ OUT gcsSURF_FORMAT_INFO_PTR * Info
++ );
++
++/* Compute the color pixel mask. */
++gceSTATUS
++gcoSURF_ComputeColorMask(
++ IN gcsSURF_FORMAT_INFO_PTR Format,
++ OUT gctUINT32_PTR ColorMask
++ );
++
++/* Flush the surface. */
++gceSTATUS
++gcoSURF_Flush(
++ IN gcoSURF Surface
++ );
++
++/* Fill surface from it's tile status buffer. */
++gceSTATUS
++gcoSURF_FillFromTile(
++ IN gcoSURF Surface
++ );
++
++/* Check if surface needs a filler. */
++gceSTATUS gcoSURF_NeedFiller(IN gcoSURF Surface);
++
++/* Fill surface with a value. */
++gceSTATUS
++gcoSURF_Fill(
++ IN gcoSURF Surface,
++ IN gcsPOINT_PTR Origin,
++ IN gcsSIZE_PTR Size,
++ IN gctUINT32 Value,
++ IN gctUINT32 Mask
++ );
++
++/* Alpha blend two surfaces together. */
++gceSTATUS
++gcoSURF_Blend(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrig,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsSIZE_PTR Size,
++ IN gceSURF_BLEND_MODE Mode
++ );
++
++/* Create a new gcoSURF wrapper object. */
++gceSTATUS
++gcoSURF_ConstructWrapper(
++ IN gcoHAL Hal,
++ OUT gcoSURF * Surface
++ );
++
++/* Set the underlying buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Stride,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical
++ );
++
++/* Set the underlying video buffer for the surface wrapper. */
++gceSTATUS
++gcoSURF_SetVideoBuffer(
++ IN gcoSURF Surface,
++ IN gceSURF_TYPE Type,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Stride,
++ IN gctPOINTER *LogicalPlane1,
++ IN gctUINT32 *PhysicalPlane1
++ );
++
++/* Set the size of the surface in pixels and map the underlying buffer. */
++gceSTATUS
++gcoSURF_SetWindow(
++ IN gcoSURF Surface,
++ IN gctUINT X,
++ IN gctUINT Y,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Set width/height alignment of the surface directly and calculate stride/size. This is only for dri backend now. Please be careful before use. */
++gceSTATUS
++gcoSURF_SetAlignment(
++ IN gcoSURF Surface,
++ IN gctUINT Width,
++ IN gctUINT Height
++ );
++
++/* Increase reference count of the surface. */
++gceSTATUS
++gcoSURF_ReferenceSurface(
++ IN gcoSURF Surface
++ );
++
++/* Get surface reference count. */
++gceSTATUS
++gcoSURF_QueryReferenceCount(
++ IN gcoSURF Surface,
++ OUT gctINT32 * ReferenceCount
++ );
++
++/* Set surface orientation. */
++gceSTATUS
++gcoSURF_SetOrientation(
++ IN gcoSURF Surface,
++ IN gceORIENTATION Orientation
++ );
++
++/* Query surface orientation. */
++gceSTATUS
++gcoSURF_QueryOrientation(
++ IN gcoSURF Surface,
++ OUT gceORIENTATION * Orientation
++ );
++
++gceSTATUS
++gcoSURF_SetOffset(
++ IN gcoSURF Surface,
++ IN gctUINT Offset
++ );
++
++gceSTATUS
++gcoSURF_GetOffset(
++ IN gcoSURF Surface,
++ OUT gctUINT *Offset
++ );
++
++gceSTATUS
++gcoSURF_NODE_Cache(
++ IN gcsSURF_NODE_PTR Node,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++/* Perform CPU cache operation on surface */
++gceSTATUS
++gcoSURF_CPUCacheOperation(
++ IN gcoSURF Surface,
++ IN gceCACHEOPERATION Operation
++ );
++
++
++gceSTATUS
++gcoSURF_SetLinearResolveAddress(
++ IN gcoSURF Surface,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory
++ );
++
++ gceSTATUS
++ gcoSURF_Swap(IN gcoSURF Surface1, IN gcoSURF Surface2);
++
++/******************************************************************************\
++********************************* gcoDUMP Object ********************************
++\******************************************************************************/
++
++/* Construct a new gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Construct(
++ IN gcoOS Os,
++ IN gcoHAL Hal,
++ OUT gcoDUMP * Dump
++ );
++
++/* Destroy a gcoDUMP object. */
++gceSTATUS
++gcoDUMP_Destroy(
++ IN gcoDUMP Dump
++ );
++
++/* Enable/disable dumping. */
++gceSTATUS
++gcoDUMP_Control(
++ IN gcoDUMP Dump,
++ IN gctSTRING FileName
++ );
++
++gceSTATUS
++gcoDUMP_IsEnabled(
++ IN gcoDUMP Dump,
++ OUT gctBOOL * Enabled
++ );
++
++/* Add surface. */
++gceSTATUS
++gcoDUMP_AddSurface(
++ IN gcoDUMP Dump,
++ IN gctINT32 Width,
++ IN gctINT32 Height,
++ IN gceSURF_FORMAT PixelFormat,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount
++ );
++
++/* Mark the beginning of a frame. */
++gceSTATUS
++gcoDUMP_FrameBegin(
++ IN gcoDUMP Dump
++ );
++
++/* Mark the end of a frame. */
++gceSTATUS
++gcoDUMP_FrameEnd(
++ IN gcoDUMP Dump
++ );
++
++/* Dump data. */
++gceSTATUS
++gcoDUMP_DumpData(
++ IN gcoDUMP Dump,
++ IN gceDUMP_TAG Type,
++ IN gctUINT32 Address,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Delete an address. */
++gceSTATUS
++gcoDUMP_Delete(
++ IN gcoDUMP Dump,
++ IN gctUINT32 Address
++ );
++
++/* Enable dump or not. */
++gceSTATUS
++gcoDUMP_SetDumpFlag(
++ IN gctBOOL DumpState
++ );
++
++/******************************************************************************\
++******************************* gcsRECT Structure ******************************
++\******************************************************************************/
++
++/* Initialize rectangle structure. */
++gceSTATUS
++gcsRECT_Set(
++ OUT gcsRECT_PTR Rect,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Return the width of the rectangle. */
++gceSTATUS
++gcsRECT_Width(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Width
++ );
++
++/* Return the height of the rectangle. */
++gceSTATUS
++gcsRECT_Height(
++ IN gcsRECT_PTR Rect,
++ OUT gctINT32 * Height
++ );
++
++/* Ensure that top left corner is to the left and above the right bottom. */
++gceSTATUS
++gcsRECT_Normalize(
++ IN OUT gcsRECT_PTR Rect
++ );
++
++/* Compare two rectangles. */
++gceSTATUS
++gcsRECT_IsEqual(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * Equal
++ );
++
++/* Compare the sizes of two rectangles. */
++gceSTATUS
++gcsRECT_IsOfEqualSize(
++ IN gcsRECT_PTR Rect1,
++ IN gcsRECT_PTR Rect2,
++ OUT gctBOOL * EqualSize
++ );
++
++gceSTATUS
++gcsRECT_RelativeRotation(
++ IN gceSURF_ROTATION Orientation,
++ IN OUT gceSURF_ROTATION *Relation);
++
++gceSTATUS
++
++gcsRECT_Rotate(
++
++ IN OUT gcsRECT_PTR Rect,
++
++ IN gceSURF_ROTATION Rotation,
++
++ IN gceSURF_ROTATION toRotation,
++
++ IN gctINT32 SurfaceWidth,
++
++ IN gctINT32 SurfaceHeight
++
++ );
++
++/******************************************************************************\
++**************************** gcsBOUNDARY Structure *****************************
++\******************************************************************************/
++
++typedef struct _gcsBOUNDARY
++{
++ gctINT x;
++ gctINT y;
++ gctINT width;
++ gctINT height;
++}
++gcsBOUNDARY;
++
++/******************************************************************************\
++********************************* gcoHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gcoHEAP * gcoHEAP;
++
++/* Construct a new gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Construct(
++ IN gcoOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gcoHEAP * Heap
++ );
++
++/* Destroy an gcoHEAP object. */
++gceSTATUS
++gcoHEAP_Destroy(
++ IN gcoHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gcoHEAP_Allocate(
++ IN gcoHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcoHEAP_GetMemorySize(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Memory,
++ OUT gctSIZE_T_PTR MemorySize
++ );
++
++/* Free memory. */
++gceSTATUS
++gcoHEAP_Free(
++ IN gcoHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++#if (VIVANTE_PROFILER || gcdDEBUG)
++/* Profile the heap. */
++gceSTATUS
++gcoHEAP_ProfileStart(
++ IN gcoHEAP Heap
++ );
++
++gceSTATUS
++gcoHEAP_ProfileEnd(
++ IN gcoHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++#endif
++
++
++/******************************************************************************\
++******************************* Debugging Macros *******************************
++\******************************************************************************/
++
++void
++gcoOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gcoOS_GetDebugLevel(
++ OUT gctUINT32_PTR DebugLevel
++ );
++
++void
++gcoOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_GetDebugZone(
++ IN gctUINT32 Zone,
++ OUT gctUINT32_PTR DebugZone
++ );
++
++void
++gcoOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gcoOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gcoOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++gctFILE
++gcoOS_ReplaceDebugFile(
++ IN gctFILE fp
++ );
++
++/*******************************************************************************
++**
++** gcmFATAL
++**
++** Print a message to the debugger and execute a break point.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugFatal(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_FATAL)
++# define gcmFATAL gcoOS_DebugFatal
++# define gcmkFATAL gckOS_DebugFatal
++#elif gcdHAS_ELLIPSES
++# define gcmFATAL(...)
++# define gcmkFATAL(...)
++#else
++ gcmINLINE static void
++ __dummy_fatal(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmFATAL __dummy_fatal
++# define gcmkFATAL __dummy_fatal
++#endif
++
++#define gcmENUM2TEXT(e) case e: return #e
++
++/*******************************************************************************
++**
++** gcmTRACE
++**
++** Print a message to the debugfer if the correct level has been set. In
++** retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** level Level of message.
++** message Message.
++** ... Optional arguments.
++*/
++#define gcvLEVEL_NONE -1
++#define gcvLEVEL_ERROR 0
++#define gcvLEVEL_WARNING 1
++#define gcvLEVEL_INFO 2
++#define gcvLEVEL_VERBOSE 3
++
++void
++gckOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceN(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTrace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE gcoOS_DebugTrace
++# define gcmkTRACE gckOS_DebugTrace
++# define gcmkTRACE_N gckOS_DebugTraceN
++#elif gcdHAS_ELLIPSES
++# define gcmTRACE(...)
++# define gcmkTRACE(...)
++# define gcmkTRACE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace(
++ IN gctUINT32 Level,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_n(
++ IN gctUINT32 Level,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE __dummy_trace
++# define gcmkTRACE __dummy_trace
++# define gcmkTRACE_N __dummy_trace_n
++#endif
++
++/* Zones common for kernel and user. */
++#define gcvZONE_OS (1 << 0)
++#define gcvZONE_HARDWARE (1 << 1)
++#define gcvZONE_HEAP (1 << 2)
++#define gcvZONE_SIGNAL (1 << 27)
++
++/* Kernel zones. */
++#define gcvZONE_KERNEL (1 << 3)
++#define gcvZONE_VIDMEM (1 << 4)
++#define gcvZONE_COMMAND (1 << 5)
++#define gcvZONE_DRIVER (1 << 6)
++#define gcvZONE_CMODEL (1 << 7)
++#define gcvZONE_MMU (1 << 8)
++#define gcvZONE_EVENT (1 << 9)
++#define gcvZONE_DEVICE (1 << 10)
++#define gcvZONE_DATABASE (1 << 11)
++#define gcvZONE_INTERRUPT (1 << 12)
++#define gcvZONE_POWER (1 << 13)
++
++/* User zones. */
++#define gcvZONE_HAL (1 << 3)
++#define gcvZONE_BUFFER (1 << 4)
++#define gcvZONE_CONTEXT (1 << 5)
++#define gcvZONE_SURFACE (1 << 6)
++#define gcvZONE_INDEX (1 << 7)
++#define gcvZONE_STREAM (1 << 8)
++#define gcvZONE_TEXTURE (1 << 9)
++#define gcvZONE_2D (1 << 10)
++#define gcvZONE_3D (1 << 11)
++#define gcvZONE_COMPILER (1 << 12)
++#define gcvZONE_MEMORY (1 << 13)
++#define gcvZONE_STATE (1 << 14)
++#define gcvZONE_AUX (1 << 15)
++#define gcvZONE_VERTEX (1 << 16)
++#define gcvZONE_CL (1 << 17)
++#define gcvZONE_COMPOSITION (1 << 17)
++#define gcvZONE_VG (1 << 18)
++#define gcvZONE_IMAGE (1 << 19)
++#define gcvZONE_UTILITY (1 << 20)
++#define gcvZONE_PARAMETERS (1 << 21)
++
++/* API definitions. */
++#define gcvZONE_API_HAL (1 << 28)
++#define gcvZONE_API_EGL (2 << 28)
++#define gcvZONE_API_ES11 (3 << 28)
++#define gcvZONE_API_ES20 (4 << 28)
++#define gcvZONE_API_VG11 (5 << 28)
++#define gcvZONE_API_GL (6 << 28)
++#define gcvZONE_API_DFB (7 << 28)
++#define gcvZONE_API_GDI (8 << 28)
++#define gcvZONE_API_D3D (9 << 28)
++#define gcvZONE_API_ES30 (10 << 28)
++
++
++#define gcmZONE_GET_API(zone) ((zone) >> 28)
++/*Set gcdZONE_MASE like 0x0 | gcvZONE_API_EGL
++will enable print EGL module debug info*/
++#define gcdZONE_MASK 0x0FFFFFFF
++
++/* Handy zones. */
++#define gcvZONE_NONE 0
++#define gcvZONE_ALL 0x0FFFFFFF
++
++/*Dump API depth set 1 for API, 2 for API and API behavior*/
++#define gcvDUMP_API_DEPTH 1
++
++/*******************************************************************************
++**
++** gcmTRACE_ZONE
++**
++** Print a message to the debugger if the correct level and zone has been
++** set. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** Level Level of message.
++** Zone Zone of message.
++** Message Message.
++** ... Optional arguments.
++*/
++
++void
++gckOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_DebugTraceZoneN(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_DebugTraceZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++# define gcmTRACE_ZONE gcoOS_DebugTraceZone
++# define gcmkTRACE_ZONE gckOS_DebugTraceZone
++# define gcmkTRACE_ZONE_N gckOS_DebugTraceZoneN
++#elif gcdHAS_ELLIPSES
++# define gcmTRACE_ZONE(...)
++# define gcmkTRACE_ZONE(...)
++# define gcmkTRACE_ZONE_N(...)
++#else
++ gcmINLINE static void
++ __dummy_trace_zone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++ gcmINLINE static void
++ __dummy_trace_zone_n(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone,
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++
++# define gcmTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE __dummy_trace_zone
++# define gcmkTRACE_ZONE_N __dummy_trace_zone_n
++#endif
++
++/*******************************************************************************
++**
++** gcmDEBUG_ONLY
++**
++** Execute a statement or function only in DEBUG mode.
++**
++** ARGUMENTS:
++**
++** f Statement or function to execute.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++# define gcmDEBUG_ONLY(f) f
++#else
++# define gcmDEBUG_ONLY(f)
++#endif
++
++/*******************************************************************************
++**
++** gcmSTACK_PUSH
++** gcmSTACK_POP
++** gcmSTACK_DUMP
++**
++** Push or pop a function with entry arguments on the trace stack.
++**
++** ARGUMENTS:
++**
++** Function Name of function.
++** Line Line number.
++** Text Optional text.
++** ... Optional arguments for text.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_STACK)
++ void
++ gcoOS_StackPush(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text,
++ ...
++ );
++ void
++ gcoOS_StackPop(
++ IN gctCONST_STRING Function
++ );
++ void
++ gcoOS_StackDump(
++ void
++ );
++# define gcmSTACK_PUSH gcoOS_StackPush
++# define gcmSTACK_POP gcoOS_StackPop
++# define gcmSTACK_DUMP gcoOS_StackDump
++#elif gcdHAS_ELLIPSES
++# define gcmSTACK_PUSH(...) do { } while (0)
++# define gcmSTACK_POP(Function) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++#else
++ gcmINLINE static void
++ __dummy_stack_push(
++ IN gctCONST_STRING Function,
++ IN gctINT Line,
++ IN gctCONST_STRING Text, ...
++ )
++ {
++ }
++# define gcmSTACK_PUSH __dummy_stack_push
++# define gcmSTACK_POP(Function) do { } while (0)
++# define gcmSTACK_DUMP() do { } while (0)
++#endif
++
++/******************************************************************************\
++******************************** Logging Macros ********************************
++\******************************************************************************/
++
++#define gcdHEADER_LEVEL gcvLEVEL_VERBOSE
++
++
++#if gcdENABLE_PROFILING
++void
++gcoOS_ProfileDB(
++ IN gctCONST_STRING Function,
++ IN OUT gctBOOL_PTR Initialized
++ );
++
++#define gcmHEADER() \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmHEADER_ARG(...) \
++ static gctBOOL __profile__initialized__ = gcvFALSE; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcoOS_ProfileDB(__FUNCTION__, &__profile__initialized__)
++#define gcmFOOTER() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_ARG(...) \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(__FUNCTION__, gcvNULL)
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcoOS_ProfileDB(gcvNULL, gcvNULL)
++
++#else /* gcdENABLE_PROFILING */
++
++#if gcdHAS_ELLIPSES
++#define gcmHEADER() \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, gcvNULL, gcvNULL); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_header(void)
++ {
++ }
++# define gcmHEADER __dummy_header
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmHEADER_ARG(Text, ...) \
++ gctINT8 __user__ = 1; \
++ gctINT8_PTR __user_ptr__ = &__user__; \
++ gcmSTACK_PUSH(__FUNCTION__, __LINE__, Text, __VA_ARGS__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#else
++ gcmINLINE static void
++ __dummy_header_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmHEADER_ARG __dummy_header_arg
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmFOOTER() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmPROFILE_ONLY(gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d) [%llu,%llu]: status=%d(%s)", \
++ __FUNCTION__, __LINE__, \
++ __ticks__, __total__, \
++ status, gcoOS_DebugStatus2Name(status))); \
++ gcmPROFILE_ELSE(gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, \
++ status, gcoOS_DebugStatus2Name(status))); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer(void)
++ {
++ }
++# define gcmFOOTER __dummy_footer
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmFOOTER_NO() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_no(void)
++ {
++ }
++# define gcmFOOTER_NO __dummy_footer_no
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmFOOTER_KILL() \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_kill(void)
++ {
++ }
++# define gcmFOOTER_KILL __dummy_footer_kill
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmFOOTER_ARG(Text, ...) \
++ gcmSTACK_POP(__FUNCTION__); \
++ gcmTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__user_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_footer_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmFOOTER_ARG __dummy_footer_arg
++#endif
++
++#endif /* gcdENABLE_PROFILING */
++
++#if gcdHAS_ELLIPSES
++#define gcmkHEADER() \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d)", __FUNCTION__, __LINE__)
++#else
++ gcmINLINE static void
++ __dummy_kheader(void)
++ {
++ }
++# define gcmkHEADER __dummy_kheader
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmkHEADER_ARG(Text, ...) \
++ gctINT8 __kernel__ = 1; \
++ gctINT8_PTR __kernel_ptr__ = &__kernel__; \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "++%s(%d): " Text, __FUNCTION__, __LINE__, __VA_ARGS__)
++#else
++ gcmINLINE static void
++ __dummy_kheader_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkHEADER_ARG __dummy_kheader_arg
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmkFOOTER() \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): status=%d(%s)", \
++ __FUNCTION__, __LINE__, status, gckOS_DebugStatus2Name(status)); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter(void)
++ {
++ }
++# define gcmkFOOTER __dummy_kfooter
++#endif
++
++#if gcdHAS_ELLIPSES
++#define gcmkFOOTER_NO() \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d)", __FUNCTION__, __LINE__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_no(void)
++ {
++ }
++# define gcmkFOOTER_NO __dummy_kfooter_no
++#endif
++
++#if gcdHAS_ELLIPSES
++# define gcmkFOOTER_ARG(Text, ...) \
++ gcmkTRACE_ZONE(gcdHEADER_LEVEL, _GC_OBJ_ZONE, \
++ "--%s(%d): " Text, \
++ __FUNCTION__, __LINE__, __VA_ARGS__); \
++ *__kernel_ptr__ -= 1
++#else
++ gcmINLINE static void
++ __dummy_kfooter_arg(
++ IN gctCONST_STRING Text,
++ ...
++ )
++ {
++ }
++# define gcmkFOOTER_ARG __dummy_kfooter_arg
++#endif
++
++#define gcmOPT_VALUE(ptr) (((ptr) == gcvNULL) ? 0 : *(ptr))
++#define gcmOPT_VALUE_INDEX(ptr, index) (((ptr) == gcvNULL) ? 0 : ptr[index])
++#define gcmOPT_POINTER(ptr) (((ptr) == gcvNULL) ? gcvNULL : *(ptr))
++#define gcmOPT_STRING(ptr) (((ptr) == gcvNULL) ? "(nil)" : (ptr))
++
++void
++gckOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_PrintN(
++ IN gctUINT ArgumentSize,
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gckOS_CopyPrint(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_Print(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#define gcmPRINT gcoOS_Print
++#define gcmkPRINT gckOS_Print
++#define gcmkPRINT_N gckOS_PrintN
++
++#if gcdPRINT_VERSION
++# define gcmPRINT_VERSION() do { \
++ _gcmPRINT_VERSION(gcm); \
++ gcmSTACK_DUMP(); \
++ } while (0)
++# define gcmkPRINT_VERSION() _gcmPRINT_VERSION(gcmk)
++# define _gcmPRINT_VERSION(prefix) \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "Vivante HAL version %d.%d.%d build %d %s %s", \
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, \
++ gcvVERSION_BUILD, gcvVERSION_DATE, gcvVERSION_TIME )
++#else
++# define gcmPRINT_VERSION() do { gcmSTACK_DUMP(); } while (gcvFALSE)
++# define gcmkPRINT_VERSION() do { } while (gcvFALSE)
++#endif
++
++typedef enum _gceDUMP_BUFFER
++{
++ gceDUMP_BUFFER_CONTEXT,
++ gceDUMP_BUFFER_USER,
++ gceDUMP_BUFFER_KERNEL,
++ gceDUMP_BUFFER_LINK,
++ gceDUMP_BUFFER_WAITLINK,
++ gceDUMP_BUFFER_FROM_USER,
++}
++gceDUMP_BUFFER;
++
++void
++gckOS_DumpBuffer(
++ IN gckOS Os,
++ IN gctPOINTER Buffer,
++ IN gctUINT Size,
++ IN gceDUMP_BUFFER Type,
++ IN gctBOOL CopyMessage
++ );
++
++#define gcmkDUMPBUFFER gckOS_DumpBuffer
++
++#if gcdDUMP_COMMAND
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage) \
++ gcmkDUMPBUFFER(Os, Buffer, Size, Type, CopyMessage)
++#else
++# define gcmkDUMPCOMMAND(Os, Buffer, Size, Type, CopyMessage)
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++void
++gckOS_DebugFlush(
++ gctCONST_STRING CallerName,
++ gctUINT LineNumber,
++ gctUINT32 DmaAddress
++ );
++
++# define gcmkDEBUGFLUSH(DmaAddress) \
++ gckOS_DebugFlush(__FUNCTION__, __LINE__, DmaAddress)
++#else
++# define gcmkDEBUGFLUSH(DmaAddress)
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_FRAMERATE
++**
++** Print average frame rate
++**
++*/
++#if gcdDUMP_FRAMERATE
++ gceSTATUS
++ gcfDumpFrameRate(
++ void
++ );
++# define gcmDUMP_FRAMERATE gcfDumpFrameRate
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_FRAMERATE(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_frame_rate(
++ void
++ )
++ {
++ }
++# define gcmDUMP_FRAMERATE __dummy_dump_frame_rate
++#endif
++
++
++/*******************************************************************************
++**
++** gcmDUMP
++**
++** Print a dump message.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++#if gcdDUMP
++ gceSTATUS
++ gcfDump(
++ IN gcoOS Os,
++ IN gctCONST_STRING String,
++ ...
++ );
++# define gcmDUMP gcfDump
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP(...)
++#else
++ gcmINLINE static void
++ __dummy_dump(
++ IN gcoOS Os,
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP __dummy_dump
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_DATA
++**
++** Add data to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++ gceSTATUS
++ gcfDumpData(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_DATA gcfDumpData
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_data(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_DATA __dummy_dump_data
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_BUFFER
++**
++** Print a buffer to the dump.
++**
++** ARGUMENTS:
++**
++** gctSTRING Tag
++** Tag for dump.
++**
++** gctUINT32 Physical
++** Physical address of buffer.
++**
++** gctPOINTER Logical
++** Logical address of buffer.
++**
++** gctUINT32 Offset
++** Offset into buffer.
++**
++** gctSIZE_T Bytes
++** Number of bytes.
++*/
++
++#if gcdDUMP || gcdDUMP_COMMAND
++gceSTATUS
++gcfDumpBuffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ );
++# define gcmDUMP_BUFFER gcfDumpBuffer
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_BUFFER(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_buffer(
++ IN gcoOS Os,
++ IN gctSTRING Tag,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes
++ )
++ {
++ }
++# define gcmDUMP_BUFFER __dummy_dump_buffer
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API
++**
++** Print a dump message for a high level API prefixed by the function name.
++**
++** ARGUMENTS:
++**
++** gctSTRING Message.
++**
++** ... Optional arguments.
++*/
++gceSTATUS gcfDumpApi(IN gctCONST_STRING String, ...);
++#if gcdDUMP_API
++# define gcmDUMP_API gcfDumpApi
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api(
++ IN gctCONST_STRING Message,
++ ...
++ )
++ {
++ }
++# define gcmDUMP_API __dummy_dump_api
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY
++**
++** Print an array of data.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Size.
++*/
++gceSTATUS gcfDumpArray(IN gctCONST_POINTER Data, IN gctUINT32 Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY gcfDumpArray
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_ARRAY(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Size
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY __dummy_dump_api_array
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_ARRAY_TOKEN
++**
++** Print an array of data terminated by a token.
++**
++** ARGUMENTS:
++**
++** gctUINT32_PTR Pointer to array.
++** gctUINT32 Termination.
++*/
++gceSTATUS gcfDumpArrayToken(IN gctCONST_POINTER Data, IN gctUINT32 Termination);
++#if gcdDUMP_API
++# define gcmDUMP_API_ARRAY_TOKEN gcfDumpArrayToken
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_ARRAY_TOKEN(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_array_token(
++ IN gctCONST_POINTER Data,
++ IN gctUINT32 Termination
++ )
++ {
++ }
++# define gcmDUMP_API_ARRAY_TOKEN __dummy_dump_api_array_token
++#endif
++
++/*******************************************************************************
++**
++** gcmDUMP_API_DATA
++**
++** Print an array of bytes.
++**
++** ARGUMENTS:
++**
++** gctCONST_POINTER Pointer to array.
++** gctSIZE_T Size.
++*/
++gceSTATUS gcfDumpApiData(IN gctCONST_POINTER Data, IN gctSIZE_T Size);
++#if gcdDUMP_API
++# define gcmDUMP_API_DATA gcfDumpApiData
++#elif gcdHAS_ELLIPSES
++# define gcmDUMP_API_DATA(...)
++#else
++ gcmINLINE static void
++ __dummy_dump_api_data(
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Size
++ )
++ {
++ }
++# define gcmDUMP_API_DATA __dummy_dump_api_data
++#endif
++
++/*******************************************************************************
++**
++** gcmTRACE_RELEASE
++**
++** Print a message to the shader debugger.
++**
++** ARGUMENTS:
++**
++** message Message.
++** ... Optional arguments.
++*/
++
++#define gcmTRACE_RELEASE gcoOS_DebugShaderTrace
++
++void
++gcoOS_DebugShaderTrace(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++void
++gcoOS_SetDebugShaderFiles(
++ IN gctCONST_STRING VSFileName,
++ IN gctCONST_STRING FSFileName
++ );
++
++void
++gcoOS_SetDebugShaderFileType(
++ IN gctUINT32 ShaderType
++ );
++
++void
++gcoOS_EnableDebugBuffer(
++ IN gctBOOL Enable
++ );
++
++/*******************************************************************************
++**
++** gcmBREAK
++**
++** Break into the debugger. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** None.
++*/
++
++void
++gcoOS_DebugBreak(
++ void
++ );
++
++void
++gckOS_DebugBreak(
++ void
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_BREAK)
++# define gcmBREAK gcoOS_DebugBreak
++# define gcmkBREAK gckOS_DebugBreak
++#else
++# define gcmBREAK()
++# define gcmkBREAK()
++#endif
++
++/*******************************************************************************
++**
++** gcmASSERT
++**
++** Evaluate an expression and break into the debugger if the expression
++** evaluates to false. In retail mode this macro does nothing.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define _gcmASSERT(prefix, exp) \
++ do \
++ { \
++ if (!(exp)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ASSERT at %s(%d)", \
++ __FUNCTION__, __LINE__); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ "(%s)", #exp); \
++ prefix##BREAK(); \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmASSERT(exp) _gcmASSERT(gcm, exp)
++# define gcmkASSERT(exp) _gcmASSERT(gcmk, exp)
++#else
++# define gcmASSERT(exp)
++# define gcmkASSERT(exp)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY
++**
++** Verify if an expression returns true. If the expression does not
++** evaluates to true, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** exp Expression to evaluate.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY(exp) gcmASSERT(exp)
++# define gcmkVERIFY(exp) gcmkASSERT(exp)
++#else
++# define gcmVERIFY(exp) exp
++# define gcmkVERIFY(exp) exp
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_OK
++**
++** Verify a fucntion returns gcvSTATUS_OK. If the function does not return
++** gcvSTATUS_OK, an assertion will happen in debug mode.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++
++void
++gcoOS_Verify(
++ IN gceSTATUS status
++ );
++
++void
++gckOS_Verify(
++ IN gceSTATUS status
++ );
++
++#if gcmIS_DEBUG(gcdDEBUG_ASSERT)
++# define gcmVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ gcoOS_Verify(verifyStatus); \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gcmASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++# define gcmkVERIFY_OK(func) \
++ do \
++ { \
++ gceSTATUS verifyStatus = func; \
++ if (verifyStatus != gcvSTATUS_OK) \
++ { \
++ gcmkTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmkVERIFY_OK(%d): function returned %d", \
++ __LINE__, verifyStatus \
++ ); \
++ } \
++ gckOS_Verify(verifyStatus); \
++ gcmkASSERT(verifyStatus == gcvSTATUS_OK); \
++ } \
++ while (gcvFALSE)
++#else
++# define gcmVERIFY_OK(func) func
++# define gcmkVERIFY_OK(func) func
++#endif
++
++gctCONST_STRING
++gcoOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++gctCONST_STRING
++gckOS_DebugStatus2Name(
++ gceSTATUS status
++ );
++
++/*******************************************************************************
++**
++** gcmERR_BREAK
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_BREAK(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_BREAK: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_BREAK(func) _gcmERR_BREAK(gcm, func)
++#define gcmkERR_BREAK(func) _gcmkERR_BREAK(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmERR_RETURN
++**
++** Executes a return on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define _gcmkERR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ prefix##FOOTER(); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++#define gcmERR_RETURN(func) _gcmERR_RETURN(gcm, func)
++#define gcmkERR_RETURN(func) _gcmkERR_RETURN(gcmk, func)
++
++
++/*******************************************************************************
++**
++** gcmONERROR
++**
++** Jump to the error handler in case there is an error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkONERROR(prefix, func) \
++ do \
++ { \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ONERROR: status=%d(%s) @ %s(%d)", \
++ status, gckOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ goto OnError; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmONERROR(func) _gcmONERROR(gcm, func)
++#define gcmkONERROR(func) _gcmkONERROR(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmVERIFY_LOCK
++**
++** Verifies whether the surface is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_LOCK(surfaceInfo) \
++ if (!surfaceInfo->node.valid) \
++ { \
++ gcmONERROR(gcvSTATUS_MEMORY_UNLOCKED); \
++ } \
++
++/*******************************************************************************
++**
++** gcmVERIFY_NODE_LOCK
++**
++** Verifies whether the surface node is locked.
++**
++** ARGUMENTS:
++**
++** surfaceInfo Pointer to the surface iniformational structure.
++*/
++#define gcmVERIFY_NODE_LOCK(surfaceNode) \
++ if (!(surfaceNode)->valid) \
++ { \
++ status = gcvSTATUS_MEMORY_UNLOCKED; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmBADOBJECT_BREAK
++**
++** Executes a break statement on bad object.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#define gcmBADOBJECT_BREAK(obj, t) \
++ if ((obj == gcvNULL) \
++ || (((gcsOBJECT *)(obj))->type != t) \
++ ) \
++ { \
++ status = gcvSTATUS_INVALID_OBJECT; \
++ break; \
++ } \
++ do { } while (gcvFALSE)
++
++/*******************************************************************************
++**
++** gcmCHECK_STATUS
++**
++** Executes a break statement on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** func Function to evaluate.
++*/
++#define _gcmCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gcoOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define _gcmkCHECK_STATUS(prefix, func) \
++ do \
++ { \
++ last = func; \
++ if (gcmIS_ERROR(last)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "CHECK_STATUS: status=%d(%s) @ %s(%d)", \
++ last, gckOS_DebugStatus2Name(last), __FUNCTION__, __LINE__); \
++ status = last; \
++ } \
++ } \
++ while (gcvFALSE)
++#define gcmCHECK_STATUS(func) _gcmCHECK_STATUS(gcm, func)
++#define gcmkCHECK_STATUS(func) _gcmkCHECK_STATUS(gcmk, func)
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT(prefix, arg) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, #prefix "VERIFY_ARGUMENT failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT); \
++ return gcvSTATUS_INVALID_ARGUMENT; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkVERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcmk, arg)
++
++/*******************************************************************************
++**
++** gcmDEBUG_VERIFY_ARGUMENT
++**
++** Works just like gcmVERIFY_ARGUMENT, but is only valid in debug mode.
++** Use this to verify arguments inside non-public API functions.
++*/
++#if gcdDEBUG
++# define gcmDEBUG_VERIFY_ARGUMENT(arg) _gcmVERIFY_ARGUMENT(gcm, arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg) _gcmkVERIFY_ARGUMENT(gcm, arg)
++#else
++# define gcmDEBUG_VERIFY_ARGUMENT(arg)
++# define gcmkDEBUG_VERIFY_ARGUMENT(arg)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFY_ARGUMENT_RETURN
++**
++** Assert if an argument does not apply to the specified expression. If
++** the argument evaluates to false, gcvSTATUS_INVALID_ARGUMENT will be
++** returned from the current function. In retail mode this macro does
++** nothing.
++**
++** ARGUMENTS:
++**
++** arg Argument to evaluate.
++*/
++# define _gcmVERIFY_ARGUMENT_RETURN(prefix, arg, value) \
++ do \
++ { \
++ if (!(arg)) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "gcmVERIFY_ARGUMENT_RETURN failed:"); \
++ prefix##ASSERT(arg); \
++ prefix##FOOTER_ARG("value=%d", value); \
++ return value; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcm, arg, value)
++# define gcmkVERIFY_ARGUMENT_RETURN(arg, value) \
++ _gcmVERIFY_ARGUMENT_RETURN(gcmk, arg, value)
++
++#define MAX_LOOP_COUNT 0x7FFFFFFF
++
++/******************************************************************************\
++****************************** User Debug Option ******************************
++\******************************************************************************/
++
++/* User option. */
++typedef enum _gceDEBUG_MSG
++{
++ gcvDEBUG_MSG_NONE,
++ gcvDEBUG_MSG_ERROR,
++ gcvDEBUG_MSG_WARNING
++}
++gceDEBUG_MSG;
++
++typedef struct _gcsUSER_DEBUG_OPTION
++{
++ gceDEBUG_MSG debugMsg;
++}
++gcsUSER_DEBUG_OPTION;
++
++gcsUSER_DEBUG_OPTION *
++gcGetUserDebugOption(
++ void
++ );
++
++struct _gcoOS_SymbolsList
++{
++ gcePATCH_ID patchId;
++ const char * symList[10];
++};
++
++#if gcdHAS_ELLIPSES
++#define gcmUSER_DEBUG_MSG(level, ...) \
++ do \
++ { \
++ if (level <= gcGetUserDebugOption()->debugMsg) \
++ { \
++ gcoOS_Print(__VA_ARGS__); \
++ } \
++ } while (gcvFALSE)
++
++#define gcmUSER_DEBUG_ERROR_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_ERROR, "Error: " __VA_ARGS__)
++#define gcmUSER_DEBUG_WARNING_MSG(...) gcmUSER_DEBUG_MSG(gcvDEBUG_MSG_WARNING, "Warring: " __VA_ARGS__)
++#else
++#define gcmUSER_DEBUG_MSG
++#define gcmUSER_DEBUG_ERROR_MSG
++#define gcmUSER_DEBUG_WARNING_MSG
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_base_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_compiler.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_compiler.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_compiler.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_compiler.h 2015-07-27 23:13:06.210822785 +0200
+@@ -0,0 +1,4356 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++/*
++** Include file the defines the front- and back-end compilers, as well as the
++** objects they use.
++*/
++
++#ifndef __gc_hal_compiler_h_
++#define __gc_hal_compiler_h_
++
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_types.h"
++#include "gc_hal_engine.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#ifndef GC_ENABLE_LOADTIME_OPT
++#define GC_ENABLE_LOADTIME_OPT 1
++#endif
++
++#define TEMP_OPT_CONSTANT_TEXLD_COORD 0
++
++#define TEMP_SHADER_PATCH 1
++
++#define TEMP_INLINE_ALL_EXPANSION 1
++/******************************* IR VERSION ******************/
++#define gcdSL_IR_VERSION gcmCC('\0','\0','\0','\1')
++
++/******************************************************************************\
++|******************************* SHADER LANGUAGE ******************************|
++\******************************************************************************/
++
++ /* allocator/deallocator function pointer */
++typedef gceSTATUS (*gctAllocatorFunc)(
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++typedef gceSTATUS (*gctDeallocatorFunc)(
++ IN gctPOINTER Memory
++ );
++
++typedef gctBOOL (*compareFunc) (
++ IN void * data,
++ IN void * key
++ );
++
++typedef struct _gcsListNode gcsListNode;
++struct _gcsListNode
++{
++ gcsListNode * next;
++ void * data;
++};
++
++typedef struct _gcsAllocator
++{
++ gctAllocatorFunc allocate;
++ gctDeallocatorFunc deallocate;
++} gcsAllocator;
++
++/* simple map structure */
++typedef struct _SimpleMap SimpleMap;
++struct _SimpleMap
++{
++ gctUINT32 key;
++ gctUINT32 val;
++ SimpleMap *next;
++ gcsAllocator *allocator;
++
++};
++
++/* SimpleMap Operations */
++/* return -1 if not found, otherwise return the mapped value */
++gctUINT32
++gcSimpleMap_Find(
++ IN SimpleMap *Map,
++ IN gctUINT32 Key
++ );
++
++gceSTATUS
++gcSimpleMap_Destory(
++ IN SimpleMap * Map,
++ IN gcsAllocator * Allocator
++ );
++
++/* Add a pair <Key, Val> to the Map head, the user should be aware that the
++ * map pointer is always changed when adding a new node :
++ *
++ * gcSimpleMap_AddNode(&theMap, key, val, allocator);
++ *
++ */
++gceSTATUS
++gcSimpleMap_AddNode(
++ IN SimpleMap ** Map,
++ IN gctUINT32 Key,
++ IN gctUINT32 Val,
++ IN gcsAllocator * Allocator
++ );
++
++/* gcsList data structure and related operations */
++typedef struct _gcsList
++{
++ gcsListNode *head;
++ gcsListNode *tail;
++ gctINT count;
++ gcsAllocator *allocator;
++} gcsList;
++
++/* List operations */
++void
++gcList_Init(
++ IN gcsList *list,
++ IN gcsAllocator *allocator
++ );
++
++gceSTATUS
++gcList_CreateNode(
++ IN void * Data,
++ IN gctAllocatorFunc Allocator,
++ OUT gcsListNode ** ListNode
++ );
++
++gceSTATUS
++gcList_Clean(
++ IN gcsList * List,
++ IN gctBOOL FreeData
++ );
++
++gcsListNode *
++gcList_FindNode(
++ IN gcsList * List,
++ IN void * Key,
++ IN compareFunc compare
++ );
++
++gceSTATUS
++gcList_AddNode(
++ IN gcsList * List,
++ IN void * Data
++ );
++
++gceSTATUS
++gcList_RemoveNode(
++ IN gcsList * List,
++ IN gcsListNode * Node
++ );
++
++/* link list structure for code list */
++typedef gcsList gcsCodeList;
++typedef gcsCodeList * gctCodeList;
++typedef gcsListNode gcsCodeListNode;
++
++/* Possible shader language opcodes. */
++typedef enum _gcSL_OPCODE
++{
++ gcSL_NOP, /* 0x00 */
++ gcSL_MOV, /* 0x01 */
++ gcSL_SAT, /* 0x02 */
++ gcSL_DP3, /* 0x03 */
++ gcSL_DP4, /* 0x04 */
++ gcSL_ABS, /* 0x05 */
++ gcSL_JMP, /* 0x06 */
++ gcSL_ADD, /* 0x07 */
++ gcSL_MUL, /* 0x08 */
++ gcSL_RCP, /* 0x09 */
++ gcSL_SUB, /* 0x0A */
++ gcSL_KILL, /* 0x0B */
++ gcSL_TEXLD, /* 0x0C */
++ gcSL_CALL, /* 0x0D */
++ gcSL_RET, /* 0x0E */
++ gcSL_NORM, /* 0x0F */
++ gcSL_MAX, /* 0x10 */
++ gcSL_MIN, /* 0x11 */
++ gcSL_POW, /* 0x12 */
++ gcSL_RSQ, /* 0x13 */
++ gcSL_LOG, /* 0x14 */
++ gcSL_FRAC, /* 0x15 */
++ gcSL_FLOOR, /* 0x16 */
++ gcSL_CEIL, /* 0x17 */
++ gcSL_CROSS, /* 0x18 */
++ gcSL_TEXLDP, /* 0x19 */
++ gcSL_TEXBIAS, /* 0x1A */
++ gcSL_TEXGRAD, /* 0x1B */
++ gcSL_TEXLOD, /* 0x1C */
++ gcSL_SIN, /* 0x1D */
++ gcSL_COS, /* 0x1E */
++ gcSL_TAN, /* 0x1F */
++ gcSL_EXP, /* 0x20 */
++ gcSL_SIGN, /* 0x21 */
++ gcSL_STEP, /* 0x22 */
++ gcSL_SQRT, /* 0x23 */
++ gcSL_ACOS, /* 0x24 */
++ gcSL_ASIN, /* 0x25 */
++ gcSL_ATAN, /* 0x26 */
++ gcSL_SET, /* 0x27 */
++ gcSL_DSX, /* 0x28 */
++ gcSL_DSY, /* 0x29 */
++ gcSL_FWIDTH, /* 0x2A */
++ gcSL_DIV, /* 0x2B */
++ gcSL_MOD, /* 0x2C */
++ gcSL_AND_BITWISE, /* 0x2D */
++ gcSL_OR_BITWISE, /* 0x2E */
++ gcSL_XOR_BITWISE, /* 0x2F */
++ gcSL_NOT_BITWISE, /* 0x30 */
++ gcSL_LSHIFT, /* 0x31 */
++ gcSL_RSHIFT, /* 0x32 */
++ gcSL_ROTATE, /* 0x33 */
++ gcSL_BITSEL, /* 0x34 */
++ gcSL_LEADZERO, /* 0x35 */
++ gcSL_LOAD, /* 0x36 */
++ gcSL_STORE, /* 0x37 */
++ gcSL_BARRIER, /* 0x38 */
++ gcSL_STORE1, /* 0x39 */
++ gcSL_ATOMADD, /* 0x3A */
++ gcSL_ATOMSUB, /* 0x3B */
++ gcSL_ATOMXCHG, /* 0x3C */
++ gcSL_ATOMCMPXCHG, /* 0x3D */
++ gcSL_ATOMMIN, /* 0x3E */
++ gcSL_ATOMMAX, /* 0x3F */
++ gcSL_ATOMOR, /* 0x40 */
++ gcSL_ATOMAND, /* 0x41 */
++ gcSL_ATOMXOR, /* 0x42 */
++ /*gcSL_UNUSED, 0x43 */
++ /*gcSL_UNUSED, 0x44 */
++ /*gcSL_UNUSED, 0x45 */
++ /*gcSL_UNUSED, 0x46 */
++ /*gcSL_UNUSED, 0x47 */
++ /*gcSL_UNUSED, 0x48 */
++ /*gcSL_UNUSED, 0x49 */
++ /*gcSL_UNUSED, 0x4A */
++ /*gcSL_UNUSED, 0x4B */
++ /*gcSL_UNUSED, 0x4C */
++ /*gcSL_UNUSED, 0x4D */
++ /*gcSL_UNUSED, 0x4E */
++ /*gcSL_UNUSED, 0x4F */
++ /*gcSL_UNUSED, 0x50 */
++ /*gcSL_UNUSED, 0x51 */
++ /*gcSL_UNUSED, 0x52 */
++ gcSL_ADDLO = 0x53, /* 0x53 */ /* Float only. */
++ gcSL_MULLO, /* 0x54 */ /* Float only. */
++ gcSL_CONV, /* 0x55 */
++ gcSL_GETEXP, /* 0x56 */
++ gcSL_GETMANT, /* 0x57 */
++ gcSL_MULHI, /* 0x58 */ /* Integer only. */
++ gcSL_CMP, /* 0x59 */
++ gcSL_I2F, /* 0x5A */
++ gcSL_F2I, /* 0x5B */
++ gcSL_ADDSAT, /* 0x5C */ /* Integer only. */
++ gcSL_SUBSAT, /* 0x5D */ /* Integer only. */
++ gcSL_MULSAT, /* 0x5E */ /* Integer only. */
++ gcSL_DP2, /* 0x5F */
++ gcSL_MAXOPCODE
++}
++gcSL_OPCODE;
++
++typedef enum _gcSL_FORMAT
++{
++ gcSL_FLOAT = 0, /* 0 */
++ gcSL_INTEGER = 1, /* 1 */
++ gcSL_INT32 = 1, /* 1 */
++ gcSL_BOOLEAN = 2, /* 2 */
++ gcSL_UINT32 = 3, /* 3 */
++ gcSL_INT8, /* 4 */
++ gcSL_UINT8, /* 5 */
++ gcSL_INT16, /* 6 */
++ gcSL_UINT16, /* 7 */
++ gcSL_INT64, /* 8 */ /* Reserved for future enhancement. */
++ gcSL_UINT64, /* 9 */ /* Reserved for future enhancement. */
++ gcSL_INT128, /* 10 */ /* Reserved for future enhancement. */
++ gcSL_UINT128, /* 11 */ /* Reserved for future enhancement. */
++ gcSL_FLOAT16, /* 12 */
++ gcSL_FLOAT64, /* 13 */ /* Reserved for future enhancement. */
++ gcSL_FLOAT128, /* 14 */ /* Reserved for future enhancement. */
++}
++gcSL_FORMAT;
++
++/* Destination write enable bits. */
++typedef enum _gcSL_ENABLE
++{
++ gcSL_ENABLE_NONE = 0x0, /* none is enabled, error/uninitialized state */
++ gcSL_ENABLE_X = 0x1,
++ gcSL_ENABLE_Y = 0x2,
++ gcSL_ENABLE_Z = 0x4,
++ gcSL_ENABLE_W = 0x8,
++ /* Combinations. */
++ gcSL_ENABLE_XY = gcSL_ENABLE_X | gcSL_ENABLE_Y,
++ gcSL_ENABLE_XYZ = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_Z,
++ gcSL_ENABLE_XYZW = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_XYW = gcSL_ENABLE_X | gcSL_ENABLE_Y | gcSL_ENABLE_W,
++ gcSL_ENABLE_XZ = gcSL_ENABLE_X | gcSL_ENABLE_Z,
++ gcSL_ENABLE_XZW = gcSL_ENABLE_X | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_XW = gcSL_ENABLE_X | gcSL_ENABLE_W,
++ gcSL_ENABLE_YZ = gcSL_ENABLE_Y | gcSL_ENABLE_Z,
++ gcSL_ENABLE_YZW = gcSL_ENABLE_Y | gcSL_ENABLE_Z | gcSL_ENABLE_W,
++ gcSL_ENABLE_YW = gcSL_ENABLE_Y | gcSL_ENABLE_W,
++ gcSL_ENABLE_ZW = gcSL_ENABLE_Z | gcSL_ENABLE_W,
++}
++gcSL_ENABLE;
++
++/* Possible indices. */
++typedef enum _gcSL_INDEXED
++{
++ gcSL_NOT_INDEXED, /* 0 */
++ gcSL_INDEXED_X, /* 1 */
++ gcSL_INDEXED_Y, /* 2 */
++ gcSL_INDEXED_Z, /* 3 */
++ gcSL_INDEXED_W, /* 4 */
++}
++gcSL_INDEXED;
++
++/* Opcode conditions. */
++typedef enum _gcSL_CONDITION
++{
++ gcSL_ALWAYS, /* 0x0 */
++ gcSL_NOT_EQUAL, /* 0x1 */
++ gcSL_LESS_OR_EQUAL, /* 0x2 */
++ gcSL_LESS, /* 0x3 */
++ gcSL_EQUAL, /* 0x4 */
++ gcSL_GREATER, /* 0x5 */
++ gcSL_GREATER_OR_EQUAL, /* 0x6 */
++ gcSL_AND, /* 0x7 */
++ gcSL_OR, /* 0x8 */
++ gcSL_XOR, /* 0x9 */
++ gcSL_NOT_ZERO, /* 0xA */
++}
++gcSL_CONDITION;
++
++/* Possible source operand types. */
++typedef enum _gcSL_TYPE
++{
++ gcSL_NONE, /* 0x0 */
++ gcSL_TEMP, /* 0x1 */
++ gcSL_ATTRIBUTE, /* 0x2 */
++ gcSL_UNIFORM, /* 0x3 */
++ gcSL_SAMPLER, /* 0x4 */
++ gcSL_CONSTANT, /* 0x5 */
++ gcSL_OUTPUT, /* 0x6 */
++ gcSL_PHYSICAL, /* 0x7 */
++}
++gcSL_TYPE;
++
++/* Swizzle generator macro. */
++#define gcmSWIZZLE(Component1, Component2, Component3, Component4) \
++( \
++ (gcSL_SWIZZLE_ ## Component1 << 0) | \
++ (gcSL_SWIZZLE_ ## Component2 << 2) | \
++ (gcSL_SWIZZLE_ ## Component3 << 4) | \
++ (gcSL_SWIZZLE_ ## Component4 << 6) \
++)
++
++#define gcmExtractSwizzle(Swizzle, Index) \
++ ((gcSL_SWIZZLE) ((((Swizzle) >> (Index * 2)) & 0x3)))
++
++#define gcmComposeSwizzle(SwizzleX, SwizzleY, SwizzleZ, SwizzleW) \
++( \
++ ((SwizzleX) << 0) | \
++ ((SwizzleY) << 2) | \
++ ((SwizzleZ) << 4) | \
++ ((SwizzleW) << 6) \
++)
++
++/* Possible swizzle values. */
++typedef enum _gcSL_SWIZZLE
++{
++ gcSL_SWIZZLE_X, /* 0x0 */
++ gcSL_SWIZZLE_Y, /* 0x1 */
++ gcSL_SWIZZLE_Z, /* 0x2 */
++ gcSL_SWIZZLE_W, /* 0x3 */
++ /* Combinations. */
++ gcSL_SWIZZLE_XXXX = gcmSWIZZLE(X, X, X, X),
++ gcSL_SWIZZLE_YYYY = gcmSWIZZLE(Y, Y, Y, Y),
++ gcSL_SWIZZLE_ZZZZ = gcmSWIZZLE(Z, Z, Z, Z),
++ gcSL_SWIZZLE_WWWW = gcmSWIZZLE(W, W, W, W),
++ gcSL_SWIZZLE_XYYY = gcmSWIZZLE(X, Y, Y, Y),
++ gcSL_SWIZZLE_XZZZ = gcmSWIZZLE(X, Z, Z, Z),
++ gcSL_SWIZZLE_XWWW = gcmSWIZZLE(X, W, W, W),
++ gcSL_SWIZZLE_YZZZ = gcmSWIZZLE(Y, Z, Z, Z),
++ gcSL_SWIZZLE_YWWW = gcmSWIZZLE(Y, W, W, W),
++ gcSL_SWIZZLE_ZWWW = gcmSWIZZLE(Z, W, W, W),
++ gcSL_SWIZZLE_XYZZ = gcmSWIZZLE(X, Y, Z, Z),
++ gcSL_SWIZZLE_XYWW = gcmSWIZZLE(X, Y, W, W),
++ gcSL_SWIZZLE_XZWW = gcmSWIZZLE(X, Z, W, W),
++ gcSL_SWIZZLE_YZWW = gcmSWIZZLE(Y, Z, W, W),
++ gcSL_SWIZZLE_XXYZ = gcmSWIZZLE(X, X, Y, Z),
++ gcSL_SWIZZLE_XYZW = gcmSWIZZLE(X, Y, Z, W),
++ gcSL_SWIZZLE_XYXY = gcmSWIZZLE(X, Y, X, Y),
++ gcSL_SWIZZLE_YYZZ = gcmSWIZZLE(Y, Y, Z, Z),
++ gcSL_SWIZZLE_YYWW = gcmSWIZZLE(Y, Y, W, W),
++ gcSL_SWIZZLE_ZZZW = gcmSWIZZLE(Z, Z, Z, W),
++ gcSL_SWIZZLE_XZZW = gcmSWIZZLE(X, Z, Z, W),
++ gcSL_SWIZZLE_YYZW = gcmSWIZZLE(Y, Y, Z, W),
++
++ gcSL_SWIZZLE_INVALID = 0x7FFFFFFF
++}
++gcSL_SWIZZLE;
++
++typedef enum _gcSL_COMPONENT
++{
++ gcSL_COMPONENT_X, /* 0x0 */
++ gcSL_COMPONENT_Y, /* 0x1 */
++ gcSL_COMPONENT_Z, /* 0x2 */
++ gcSL_COMPONENT_W, /* 0x3 */
++ gcSL_COMPONENT_COUNT /* 0x4 */
++} gcSL_COMPONENT;
++
++#define gcmIsComponentEnabled(Enable, Component) (((Enable) & (1 << (Component))) != 0)
++
++/******************************************************************************\
++|*********************************** SHADERS **********************************|
++\******************************************************************************/
++
++/* Shader types. */
++typedef enum _gcSHADER_KIND {
++ gcSHADER_TYPE_UNKNOWN = 0,
++ gcSHADER_TYPE_VERTEX,
++ gcSHADER_TYPE_FRAGMENT,
++ gcSHADER_TYPE_CL,
++ gcSHADER_TYPE_PRECOMPILED,
++ gcSHADER_KIND_COUNT
++} gcSHADER_KIND;
++
++typedef enum _gcGL_DRIVER_VERSION {
++ gcGL_DRIVER_ES11, /* OpenGL ES 1.1 */
++ gcGL_DRIVER_ES20, /* OpenGL ES 2.0 */
++ gcGL_DRIVER_ES30 /* OpenGL ES 3.0 */
++} gcGL_DRIVER_VERSION;
++
++/* gcSHADER objects. */
++typedef struct _gcSHADER * gcSHADER;
++typedef struct _gcATTRIBUTE * gcATTRIBUTE;
++typedef struct _gcUNIFORM * gcUNIFORM;
++typedef struct _gcOUTPUT * gcOUTPUT;
++typedef struct _gcsFUNCTION * gcFUNCTION;
++typedef struct _gcsKERNEL_FUNCTION * gcKERNEL_FUNCTION;
++typedef struct _gcsHINT * gcsHINT_PTR;
++typedef struct _gcSHADER_PROFILER * gcSHADER_PROFILER;
++typedef struct _gcVARIABLE * gcVARIABLE;
++typedef struct _gcSHADER_LIST * gcSHADER_LIST;
++
++struct _gcsHINT
++{
++ /* Numbr of data transfers for Vertex Shader output. */
++ gctUINT32 vsOutputCount;
++
++ /* Flag whether the VS has point size or not. */
++ gctBOOL vsHasPointSize;
++
++#if gcdUSE_WCLIP_PATCH
++ /* Flag whether the VS gl_position.z depends on gl_position.w
++ it's a hint for wclipping */
++ gctBOOL vsPositionZDependsOnW;
++#endif
++
++ gctBOOL clipW;
++
++ /* Flag whether or not the shader has a KILL instruction. */
++ gctBOOL hasKill;
++
++ /* Element count. */
++ gctUINT32 elementCount;
++
++ /* Component count. */
++ gctUINT32 componentCount;
++
++ /* Number of data transfers for Fragment Shader input. */
++ gctUINT32 fsInputCount;
++
++ /* Maximum number of temporary registers used in FS. */
++ gctUINT32 fsMaxTemp;
++
++ /* Maximum number of temporary registers used in VS. */
++ gctUINT32 vsMaxTemp;
++
++ /* Balance minimum. */
++ gctUINT32 balanceMin;
++
++ /* Balance maximum. */
++ gctUINT32 balanceMax;
++
++ /* Auto-shift balancing. */
++ gctBOOL autoShift;
++
++ /* Flag whether the PS outputs the depth value or not. */
++ gctBOOL psHasFragDepthOut;
++
++ /* Flag whether the ThreadWalker is in PS. */
++ gctBOOL threadWalkerInPS;
++
++ /* HW reg number for position of VS */
++ gctUINT32 hwRegNoOfSIVPos;
++
++#if gcdALPHA_KILL_IN_SHADER
++ /* States to set when alpha kill is enabled. */
++ gctUINT32 killStateAddress;
++ gctUINT32 alphaKillStateValue;
++ gctUINT32 colorKillStateValue;
++
++ /* Shader instructiuon. */
++ gctUINT32 killInstructionAddress;
++ gctUINT32 alphaKillInstruction[3];
++ gctUINT32 colorKillInstruction[3];
++#endif
++
++#if TEMP_SHADER_PATCH
++ gctUINT32 pachedShaderIdentifier;
++#endif
++
++#if gcdUSE_WCLIP_PATCH
++ /* Strict WClip match. */
++ gctBOOL strictWClipMatch;
++#endif
++};
++
++#if TEMP_SHADER_PATCH
++#define INVALID_SHADER_IDENTIFIER 0xFFFFFFFF
++#endif
++
++/* gcSHADER_TYPE enumeration. */
++typedef enum _gcSHADER_TYPE
++{
++ gcSHADER_FLOAT_X1 = 0, /* 0x00 */
++ gcSHADER_FLOAT_X2, /* 0x01 */
++ gcSHADER_FLOAT_X3, /* 0x02 */
++ gcSHADER_FLOAT_X4, /* 0x03 */
++ gcSHADER_FLOAT_2X2, /* 0x04 */
++ gcSHADER_FLOAT_3X3, /* 0x05 */
++ gcSHADER_FLOAT_4X4, /* 0x06 */
++ gcSHADER_BOOLEAN_X1, /* 0x07 */
++ gcSHADER_BOOLEAN_X2, /* 0x08 */
++ gcSHADER_BOOLEAN_X3, /* 0x09 */
++ gcSHADER_BOOLEAN_X4, /* 0x0A */
++ gcSHADER_INTEGER_X1, /* 0x0B */
++ gcSHADER_INTEGER_X2, /* 0x0C */
++ gcSHADER_INTEGER_X3, /* 0x0D */
++ gcSHADER_INTEGER_X4, /* 0x0E */
++ gcSHADER_SAMPLER_1D, /* 0x0F */
++ gcSHADER_SAMPLER_2D, /* 0x10 */
++ gcSHADER_SAMPLER_3D, /* 0x11 */
++ gcSHADER_SAMPLER_CUBIC, /* 0x12 */
++ gcSHADER_FIXED_X1, /* 0x13 */
++ gcSHADER_FIXED_X2, /* 0x14 */
++ gcSHADER_FIXED_X3, /* 0x15 */
++ gcSHADER_FIXED_X4, /* 0x16 */
++ gcSHADER_IMAGE_2D, /* 0x17 */ /* For OCL. */
++ gcSHADER_IMAGE_3D, /* 0x18 */ /* For OCL. */
++ gcSHADER_SAMPLER, /* 0x19 */ /* For OCL. */
++ gcSHADER_FLOAT_2X3, /* 0x1A */
++ gcSHADER_FLOAT_2X4, /* 0x1B */
++ gcSHADER_FLOAT_3X2, /* 0x1C */
++ gcSHADER_FLOAT_3X4, /* 0x1D */
++ gcSHADER_FLOAT_4X2, /* 0x1E */
++ gcSHADER_FLOAT_4X3, /* 0x1F */
++ gcSHADER_ISAMPLER_2D, /* 0x20 */
++ gcSHADER_ISAMPLER_3D, /* 0x21 */
++ gcSHADER_ISAMPLER_CUBIC, /* 0x22 */
++ gcSHADER_USAMPLER_2D, /* 0x23 */
++ gcSHADER_USAMPLER_3D, /* 0x24 */
++ gcSHADER_USAMPLER_CUBIC, /* 0x25 */
++ gcSHADER_SAMPLER_EXTERNAL_OES, /* 0x26 */
++
++ gcSHADER_UINT_X1, /* 0x27 */
++ gcSHADER_UINT_X2, /* 0x28 */
++ gcSHADER_UINT_X3, /* 0x29 */
++ gcSHADER_UINT_X4, /* 0x2A */
++
++ gcSHADER_UNKONWN_TYPE, /* do not add type after this */
++ gcSHADER_TYPE_COUNT /* must to change gcvShaderTypeInfo at the
++ * same time if you add any new type! */}
++gcSHADER_TYPE;
++
++typedef enum _gcSHADER_TYPE_KIND
++{
++ gceTK_UNKOWN,
++ gceTK_FLOAT,
++ gceTK_INT,
++ gceTK_UINT,
++ gceTK_BOOL,
++ gceTK_FIXED,
++ gceTK_SAMPLER,
++ gceTK_IMAGE,
++ gceTK_OTHER
++} gcSHADER_TYPE_KIND;
++
++typedef struct _gcSHADER_TYPEINFO
++{
++ gcSHADER_TYPE type; /* e.g. gcSHADER_FLOAT_2X4 */
++ gctINT components; /* e.g. 4 components */
++ gctINT rows; /* e.g. 2 rows */
++ gcSHADER_TYPE componentType; /* e.g. gcSHADER_FLOAT_X4 */
++ gcSHADER_TYPE_KIND kind; /* e.g. gceTK_FLOAT */
++ gctCONST_STRING name; /* e.g. "FLOAT_2X4" */
++} gcSHADER_TYPEINFO;
++
++extern gcSHADER_TYPEINFO gcvShaderTypeInfo[];
++
++#define gcmType_Comonents(Type) (gcvShaderTypeInfo[Type].components)
++#define gcmType_Rows(Type) (gcvShaderTypeInfo[Type].rows)
++#define gcmType_ComonentType(Type) (gcvShaderTypeInfo[Type].componentType)
++#define gcmType_Kind(Type) (gcvShaderTypeInfo[Type].kind)
++#define gcmType_Name(Type) (gcvShaderTypeInfo[Type].name)
++
++#define gcmType_isMatrix(type) (gcmType_Rows(type) > 1)
++
++typedef enum _gcSHADER_VAR_CATEGORY
++{
++ gcSHADER_VAR_CATEGORY_NORMAL = 0, /* primitive type and its array */
++ gcSHADER_VAR_CATEGORY_STRUCT = 1 /* structure */
++}
++gcSHADER_VAR_CATEGORY;
++
++typedef enum _gceTYPE_QUALIFIER
++{
++ gcvTYPE_QUALIFIER_NONE = 0x0, /* unqualified */
++ gcvTYPE_QUALIFIER_VOLATILE = 0x1, /* volatile */
++}gceTYPE_QUALIFIER;
++
++typedef gctUINT16 gctTYPE_QUALIFIER;
++
++#if GC_ENABLE_LOADTIME_OPT
++typedef struct _gcSHADER_TYPE_INFO
++{
++ gcSHADER_TYPE type; /* eg. gcSHADER_FLOAT_2X3 is the type */
++ gctCONST_STRING name; /* the name of the type: "gcSHADER_FLOAT_2X3" */
++ gcSHADER_TYPE baseType; /* its base type is gcSHADER_FLOAT_2 */
++ gctINT components; /* it has 2 components */
++ gctINT rows; /* and 3 rows */
++ gctINT size; /* the size in byte */
++} gcSHADER_TYPE_INFO;
++
++extern gcSHADER_TYPE_INFO shader_type_info[];
++
++enum gceLTCDumpOption {
++ gceLTC_DUMP_UNIFORM = 0x0001,
++ gceLTC_DUMP_EVALUATION = 0x0002,
++ gceLTC_DUMP_EXPESSION = 0x0004,
++ gceLTC_DUMP_COLLECTING = 0x0008,
++};
++
++gctBOOL gcDumpOption(gctINT Opt);
++
++#endif /* GC_ENABLE_LOADTIME_OPT */
++
++#define IS_MATRIX_TYPE(type) \
++ (((type >= gcSHADER_FLOAT_2X2) && (type <= gcSHADER_FLOAT_4X4)) || \
++ ((type >= gcSHADER_FLOAT_2X3) && (type <= gcSHADER_FLOAT_4X3)))
++
++/* gcSHADER_PRECISION enumeration. */
++typedef enum _gcSHADER_PRECISION
++{
++ gcSHADER_PRECISION_DEFAULT, /* 0x00 */
++ gcSHADER_PRECISION_HIGH, /* 0x01 */
++ gcSHADER_PRECISION_MEDIUM, /* 0x02 */
++ gcSHADER_PRECISION_LOW, /* 0x03 */
++}
++gcSHADER_PRECISION;
++
++/* Shader flags. */
++typedef enum _gceSHADER_FLAGS
++{
++ gcvSHADER_NO_OPTIMIZATION = 0x00,
++ gcvSHADER_DEAD_CODE = 0x01,
++ gcvSHADER_RESOURCE_USAGE = 0x02,
++ gcvSHADER_OPTIMIZER = 0x04,
++ gcvSHADER_USE_GL_Z = 0x08,
++ /*
++ The GC family of GPU cores model GC860 and under require the Z
++ to be from 0 <= z <= w.
++ However, OpenGL specifies the Z to be from -w <= z <= w. So we
++ have to a conversion here:
++
++ z = (z + w) / 2.
++
++ So here we append two instructions to the vertex shader.
++ */
++ gcvSHADER_USE_GL_POSITION = 0x10,
++ gcvSHADER_USE_GL_FACE = 0x20,
++ gcvSHADER_USE_GL_POINT_COORD = 0x40,
++ gcvSHADER_LOADTIME_OPTIMIZER = 0x80,
++#if gcdALPHA_KILL_IN_SHADER
++ gcvSHADER_USE_ALPHA_KILL = 0x100,
++#endif
++
++#if gcdPRE_ROTATION && (ANDROID_SDK_VERSION >= 14)
++ gcvSHADER_VS_PRE_ROTATION = 0x200,
++#endif
++
++#if TEMP_INLINE_ALL_EXPANSION
++ gcvSHADER_INLINE_ALL_EXPANSION = 0x400,
++#endif
++}
++gceSHADER_FLAGS;
++
++gceSTATUS
++gcSHADER_CheckClipW(
++ IN gctCONST_STRING VertexSource,
++ IN gctCONST_STRING FragmentSource,
++ OUT gctBOOL * clipW);
++
++/*******************************************************************************
++** gcSHADER_GetUniformVectorCount
++**
++** Get the number of vectors used by uniforms for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of vectors.
++*/
++gceSTATUS
++gcSHADER_GetUniformVectorCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcOptimizer Data Structures
++*******************************************************************************/
++typedef enum _gceSHADER_OPTIMIZATION
++{
++ /* No optimization. */
++ gcvOPTIMIZATION_NONE,
++
++ /* Flow graph construction. */
++ gcvOPTIMIZATION_CONSTRUCTION = 1 << 0,
++
++ /* Dead code elimination. */
++ gcvOPTIMIZATION_DEAD_CODE = 1 << 1,
++
++ /* Redundant move instruction elimination. */
++ gcvOPTIMIZATION_REDUNDANT_MOVE = 1 << 2,
++
++ /* Inline expansion. */
++ gcvOPTIMIZATION_INLINE_EXPANSION = 1 << 3,
++
++ /* Constant propagation. */
++ gcvOPTIMIZATION_CONSTANT_PROPAGATION = 1 << 4,
++
++ /* Redundant bounds/checking elimination. */
++ gcvOPTIMIZATION_REDUNDANT_CHECKING = 1 << 5,
++
++ /* Loop invariant movement. */
++ gcvOPTIMIZATION_LOOP_INVARIANT = 1 << 6,
++
++ /* Induction variable removal. */
++ gcvOPTIMIZATION_INDUCTION_VARIABLE = 1 << 7,
++
++ /* Common subexpression elimination. */
++ gcvOPTIMIZATION_COMMON_SUBEXPRESSION = 1 << 8,
++
++ /* Control flow/banch optimization. */
++ gcvOPTIMIZATION_CONTROL_FLOW = 1 << 9,
++
++ /* Vector component operation merge. */
++ gcvOPTIMIZATION_VECTOR_INSTRUCTION_MERGE = 1 << 10,
++
++ /* Algebra simplificaton. */
++ gcvOPTIMIZATION_ALGEBRAIC_SIMPLIFICATION = 1 << 11,
++
++ /* Pattern matching and replacing. */
++ gcvOPTIMIZATION_PATTERN_MATCHING = 1 << 12,
++
++ /* Interprocedural constant propagation. */
++ gcvOPTIMIZATION_IP_CONSTANT_PROPAGATION = 1 << 13,
++
++ /* Interprecedural register optimization. */
++ gcvOPTIMIZATION_IP_REGISTRATION = 1 << 14,
++
++ /* Optimization option number. */
++ gcvOPTIMIZATION_OPTION_NUMBER = 1 << 15,
++
++ /* Loadtime constant. */
++ gcvOPTIMIZATION_LOADTIME_CONSTANT = 1 << 16,
++
++ /* MAD instruction optimization. */
++ gcvOPTIMIZATION_MAD_INSTRUCTION = 1 << 17,
++
++ /* Special optimization for LOAD SW workaround. */
++ gcvOPTIMIZATION_LOAD_SW_WORKAROUND = 1 << 18,
++
++ /* move code into conditional block if possile */
++ gcvOPTIMIZATION_CONDITIONALIZE = 1 << 19,
++
++ /* expriemental: power optimization mode
++ 1. add extra dummy texld to tune performance
++ 2. insert NOP after high power instrucitons
++ 3. split high power vec3/vec4 instruciton to vec2/vec1 operation
++ 4. ...
++ */
++ gcvOPTIMIZATION_POWER_OPTIMIZATION = 1 << 20,
++
++ /* optimize varying packing */
++ gcvOPTIMIZATION_VARYINGPACKING = 1 << 22,
++
++#if TEMP_INLINE_ALL_EXPANSION
++ gcvOPTIMIZATION_INLINE_ALL_EXPANSION = 1 << 23,
++#endif
++
++ /* Full optimization. */
++ /* Note that gcvOPTIMIZATION_LOAD_SW_WORKAROUND is off. */
++ gcvOPTIMIZATION_FULL = 0x7FFFFFFF &
++ ~gcvOPTIMIZATION_LOAD_SW_WORKAROUND &
++ ~gcvOPTIMIZATION_INLINE_ALL_EXPANSION &
++ ~gcvOPTIMIZATION_POWER_OPTIMIZATION,
++
++ /* Optimization Unit Test flag. */
++ gcvOPTIMIZATION_UNIT_TEST = 1 << 31
++}
++gceSHADER_OPTIMIZATION;
++
++typedef enum _gceOPTIMIZATION_VaryingPaking
++{
++ gcvOPTIMIZATION_VARYINGPACKING_NONE = 0,
++ gcvOPTIMIZATION_VARYINGPACKING_NOSPLIT,
++ gcvOPTIMIZATION_VARYINGPACKING_SPLIT
++} gceOPTIMIZATION_VaryingPaking;
++
++typedef struct _gcOPTIMIZER_OPTION
++{
++ gceSHADER_OPTIMIZATION optFlags;
++
++ /* debug & dump options:
++
++ VC_OPTION=-DUMP:SRC:OPT|:OPTV|:CG|:CGV:|ALL|ALLV
++
++ SRC: dump shader source code
++ OPT: dump incoming and final IR
++ OPTV: dump result IR in each optimization phase
++ CG: dump generated machine code
++ CGV: dump BE tree and optimization detail
++
++ ALL = SRC|OPT|CG
++ ALLV = SRC|OPT|OPTV|CG|CGV
++ */
++ gctBOOL dumpShaderSource; /* dump shader source code */
++ gctBOOL dumpOptimizer; /* dump incoming and final IR */
++ gctBOOL dumpOptimizerVerbose; /* dump result IR in each optimization phase */
++ gctBOOL dumpBEGenertedCode; /* dump generated machine code */
++ gctBOOL dumpBEVerbose; /* dump BE tree and optimization detail */
++ gctBOOL dumpBEFinalIR; /* dump BE final IR */
++
++ /* Code generation */
++
++ /* Varying Packing:
++
++ VC_OPTION=-PACKVARYING:[0-2]|:T[-]m[,n]|:LshaderIdx,min,max
++
++ 0: turn off varying packing
++ 1: pack varyings, donot split any varying
++ 2: pack varyings, may split to make fully packed output
++
++ Tm: only packing shader pair which vertex shader id is m
++ Tm,n: only packing shader pair which vertex shader id
++ is in range of [m, n]
++ T-m: do not packing shader pair which vertex shader id is m
++ T-m,n: do not packing shader pair which vertex shader id
++ is in range of [m, n]
++
++ LshaderIdx,min,max : set load balance (min, max) for shaderIdx
++ if shaderIdx is -1, all shaders are impacted
++ newMin = origMin * (min/100.);
++ newMax = origMax * (max/100.);
++ */
++ gceOPTIMIZATION_VaryingPaking packVarying;
++ gctINT _triageStart;
++ gctINT _triageEnd;
++ gctINT _loadBalanceShaderIdx;
++ gctINT _loadBalanceMin;
++ gctINT _loadBalanceMax;
++
++ /* Do not generate immdeiate
++
++ VC_OPTION=-NOIMM
++
++ Force generate immediate even the machine model don't support it,
++ for testing purpose only
++
++ VC_OPTION=-FORCEIMM
++ */
++ gctBOOL noImmediate;
++ gctBOOL forceImmediate;
++
++ /* Power reduction mode options */
++ gctBOOL needPowerOptimization;
++
++ /* Patch TEXLD instruction by adding dummy texld
++ (can be used to tune GPU power usage):
++ for every TEXLD we seen, add n dummy TEXLD
++
++ it can be enabled by environment variable:
++
++ VC_OPTION=-PATCH_TEXLD:M:N
++
++ (for each M texld, add N dummy texld)
++ */
++ gctINT patchEveryTEXLDs;
++ gctINT patchDummyTEXLDs;
++
++ /* Insert NOP after high power consumption instructions
++
++ VC_OPTION="-INSERTNOP:MUL:MULLO:DP3:DP4:SEENTEXLD"
++ */
++ gctBOOL insertNOP;
++ gctBOOL insertNOPAfterMUL;
++ gctBOOL insertNOPAfterMULLO;
++ gctBOOL insertNOPAfterDP3;
++ gctBOOL insertNOPAfterDP4;
++ gctBOOL insertNOPOnlyWhenTexldSeen;
++
++ /* split MAD to MUL and ADD:
++
++ VC_OPTION=-SPLITMAD
++ */
++ gctBOOL splitMAD;
++
++ /* Convert vect3/vec4 operations to multiple vec2/vec1 operations
++
++ VC_OPTION=-SPLITVEC:MUL:MULLO:DP3:DP4
++ */
++ gctBOOL splitVec;
++ gctBOOL splitVec4MUL;
++ gctBOOL splitVec4MULLO;
++ gctBOOL splitVec4DP3;
++ gctBOOL splitVec4DP4;
++
++ /* turn/off features:
++
++ VC_OPTION=-F:n,[0|1]
++ Note: n must be decimal number
++ */
++ gctUINT featureBits;
++
++ /* inline level (default 2 at O1):
++
++ VC_OPTION=-INLINELEVEL:[0-3]
++ 0: no inline
++ 1: only inline the function only called once or small function
++ 2: inline functions be called less than 5 times or medium size function
++ 3: inline everything possible
++ */
++ gctUINT inlineLevel;
++} gcOPTIMIZER_OPTION;
++
++extern gcOPTIMIZER_OPTION theOptimizerOption;
++#define gcmGetOptimizerOption() gcGetOptimizerOption()
++
++#define gcmOPT_DUMP_SHADER_SRC() \
++ (gcmGetOptimizerOption()->dumpShaderSource != 0)
++#define gcmOPT_DUMP_OPTIMIZER() \
++ (gcmGetOptimizerOption()->dumpOptimizer != 0 || \
++ gcmOPT_DUMP_OPTIMIZER_VERBOSE() )
++#define gcmOPT_DUMP_OPTIMIZER_VERBOSE() \
++ (gcmGetOptimizerOption()->dumpOptimizerVerbose != 0)
++#define gcmOPT_DUMP_CODEGEN() \
++ (gcmGetOptimizerOption()->dumpBEGenertedCode != 0 || \
++ gcmOPT_DUMP_CODEGEN_VERBOSE() )
++#define gcmOPT_DUMP_CODEGEN_VERBOSE() \
++ (gcmGetOptimizerOption()->dumpBEVerbose != 0)
++#define gcmOPT_DUMP_FINAL_IR() \
++ (gcmGetOptimizerOption()->dumpBEFinalIR != 0)
++
++#define gcmOPT_SET_DUMP_SHADER_SRC(v) \
++ gcmGetOptimizerOption()->dumpShaderSource = (v)
++
++#define gcmOPT_PATCH_TEXLD() (gcmGetOptimizerOption()->patchDummyTEXLDs != 0)
++#define gcmOPT_INSERT_NOP() (gcmGetOptimizerOption()->insertNOP == gcvTRUE)
++#define gcmOPT_SPLITMAD() (gcmGetOptimizerOption()->splitMAD == gcvTRUE)
++#define gcmOPT_SPLITVEC() (gcmGetOptimizerOption()->splitVec == gcvTRUE)
++
++#define gcmOPT_NOIMMEDIATE() (gcmGetOptimizerOption()->noImmediate == gcvTRUE)
++#define gcmOPT_FORCEIMMEDIATE() (gcmGetOptimizerOption()->forceImmediate == gcvTRUE)
++
++#define gcmOPT_PACKVARYING() (gcmGetOptimizerOption()->packVarying)
++#define gcmOPT_PACKVARYING_triageStart() (gcmGetOptimizerOption()->_triageStart)
++#define gcmOPT_PACKVARYING_triageEnd() (gcmGetOptimizerOption()->_triageEnd)
++
++#define gcmOPT_INLINELEVEL() (gcmGetOptimizerOption()->inlineLevel)
++
++/* Setters */
++#define gcmOPT_SetPatchTexld(m,n) (gcmGetOptimizerOption()->patchEveryTEXLDs = (m),\
++ gcmGetOptimizerOption()->patchDummyTEXLDs = (n))
++#define gcmOPT_SetSplitVecMUL() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4MUL = gcvTRUE)
++#define gcmOPT_SetSplitVecMULLO() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4MULLO = gcvTRUE)
++#define gcmOPT_SetSplitVecDP3() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4DP3 = gcvTRUE)
++#define gcmOPT_SetSplitVecDP4() (gcmGetOptimizerOption()->splitVec = gcvTRUE, \
++ gcmGetOptimizerOption()->splitVec4DP4 = gcvTRUE)
++
++#define gcmOPT_SetPackVarying(v) (gcmGetOptimizerOption()->packVarying = v)
++
++#define FB_LIVERANGE_FIX1 0x0001
++
++
++#define PredefinedDummySamplerId 8
++
++/* Function argument qualifier */
++typedef enum _gceINPUT_OUTPUT
++{
++ gcvFUNCTION_INPUT,
++ gcvFUNCTION_OUTPUT,
++ gcvFUNCTION_INOUT
++}
++gceINPUT_OUTPUT;
++
++/* Kernel function property flags. */
++typedef enum _gcePROPERTY_FLAGS
++{
++ gcvPROPERTY_REQD_WORK_GRP_SIZE = 0x01
++}
++gceKERNEL_FUNCTION_PROPERTY_FLAGS;
++
++/* Uniform flags. */
++typedef enum _gceUNIFORM_FLAGS
++{
++ gcvUNIFORM_KERNEL_ARG = 0x01,
++ gcvUNIFORM_KERNEL_ARG_LOCAL = 0x02,
++ gcvUNIFORM_KERNEL_ARG_SAMPLER = 0x04,
++ gcvUNIFORM_LOCAL_ADDRESS_SPACE = 0x08,
++ gcvUNIFORM_PRIVATE_ADDRESS_SPACE = 0x10,
++ gcvUNIFORM_CONSTANT_ADDRESS_SPACE = 0x20,
++ gcvUNIFORM_GLOBAL_SIZE = 0x40,
++ gcvUNIFORM_LOCAL_SIZE = 0x80,
++ gcvUNIFORM_NUM_GROUPS = 0x100,
++ gcvUNIFORM_GLOBAL_OFFSET = 0x200,
++ gcvUNIFORM_WORK_DIM = 0x400,
++ gcvUNIFORM_KERNEL_ARG_CONSTANT = 0x800,
++ gcvUNIFORM_KERNEL_ARG_LOCAL_MEM_SIZE = 0x1000,
++ gcvUNIFORM_KERNEL_ARG_PRIVATE = 0x2000,
++ gcvUNIFORM_LOADTIME_CONSTANT = 0x4000,
++ gcvUNIFORM_IS_ARRAY = 0x8000,
++}
++gceUNIFORM_FLAGS;
++
++#define gcdUNIFORM_KERNEL_ARG_MASK (gcvUNIFORM_KERNEL_ARG | \
++ gcvUNIFORM_KERNEL_ARG_LOCAL | \
++ gcvUNIFORM_KERNEL_ARG_SAMPLER | \
++ gcvUNIFORM_KERNEL_ARG_PRIVATE | \
++ gcvUNIFORM_KERNEL_ARG_CONSTANT)
++
++typedef enum _gceVARIABLE_UPDATE_FLAGS
++{
++ gcvVARIABLE_UPDATE_NOUPDATE = 0,
++ gcvVARIABLE_UPDATE_TEMPREG,
++ gcvVARIABLE_UPDATE_TYPE_QUALIFIER,
++}gceVARIABLE_UPDATE_FLAGS;
++
++typedef struct _gcMACHINE_INST
++{
++ gctUINT state0;
++ gctUINT state1;
++ gctUINT state2;
++ gctUINT state3;
++}gcMACHINE_INST, *gcMACHINE_INST_PTR;
++
++typedef struct _gcMACHINECODE
++{
++ gcMACHINE_INST_PTR pCode; /* machine code */
++ gctUINT instCount; /* 128-bit count */
++ gctUINT maxConstRegNo;
++ gctUINT maxTempRegNo;
++ gctUINT endPCOfMainRoutine;
++}gcMACHINECODE, *gcMACHINECODE_PTR;
++
++typedef enum NP2_ADDRESS_MODE
++{
++ NP2_ADDRESS_MODE_CLAMP = 0,
++ NP2_ADDRESS_MODE_REPEAT = 1,
++ NP2_ADDRESS_MODE_MIRROR = 2
++}NP2_ADDRESS_MODE;
++
++typedef struct _gcNPOT_PATCH_PARAM
++{
++ gctINT samplerSlot;
++ NP2_ADDRESS_MODE addressMode[3];
++ gctINT texDimension; /* 2 or 3 */
++}gcNPOT_PATCH_PARAM, *gcNPOT_PATCH_PARAM_PTR;
++
++typedef struct _gcZBIAS_PATCH_PARAM
++{
++ /* Driver uses this to program uniform that designating zbias */
++ gctINT uniformAddr;
++ gctINT channel;
++}gcZBIAS_PATCH_PARAM, *gcZBIAS_PATCH_PARAM_PTR;
++
++void
++gcGetOptionFromEnv(
++ IN OUT gcOPTIMIZER_OPTION * Option
++ );
++
++void
++gcSetOptimizerOption(
++ IN gceSHADER_FLAGS Flags
++ );
++
++gcOPTIMIZER_OPTION *
++gcGetOptimizerOption();
++
++/*******************************************************************************
++** gcSHADER_SetCompilerVersion
++**
++** Set the compiler version of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to gcSHADER object
++**
++** gctINT *Version
++** Pointer to a two word version
++*/
++gceSTATUS
++gcSHADER_SetCompilerVersion(
++ IN gcSHADER Shader,
++ IN gctUINT32 *Version
++ );
++
++/*******************************************************************************
++** gcSHADER_GetCompilerVersion
++**
++** Get the compiler version of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR *CompilerVersion.
++** Pointer to holder of returned compilerVersion pointer
++*/
++gceSTATUS
++gcSHADER_GetCompilerVersion(
++ IN gcSHADER Shader,
++ OUT gctUINT32_PTR *CompilerVersion
++ );
++
++/*******************************************************************************
++** gcSHADER_GetType
++**
++** Get the gcSHADER object's type.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctINT *Type.
++** Pointer to return shader type.
++*/
++gceSTATUS
++gcSHADER_GetType(
++ IN gcSHADER Shader,
++ OUT gctINT *Type
++ );
++
++gctUINT
++gcSHADER_NextId();
++/*******************************************************************************
++** gcSHADER_Construct
++********************************************************************************
++**
++** Construct a new gcSHADER object.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctINT ShaderType
++** Type of gcSHADER object to cerate. 'ShaderType' can be one of the
++** following:
++**
++** gcSHADER_TYPE_VERTEX Vertex shader.
++** gcSHADER_TYPE_FRAGMENT Fragment shader.
++**
++** OUTPUT:
++**
++** gcSHADER * Shader
++** Pointer to a variable receiving the gcSHADER object pointer.
++*/
++gceSTATUS
++gcSHADER_Construct(
++ IN gcoHAL Hal,
++ IN gctINT ShaderType,
++ OUT gcSHADER * Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_Destroy
++********************************************************************************
++**
++** Destroy a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Destroy(
++ IN gcSHADER Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_Copy
++********************************************************************************
++**
++** Copy a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSHADER Source
++** Pointer to a gcSHADER object that will be copied.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Copy(
++ IN gcSHADER Shader,
++ IN gcSHADER Source
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadHeader
++**
++** Load a gcSHADER object from a binary buffer. The binary buffer is layed out
++** as follows:
++** // Six word header
++** // Signature, must be 'S','H','D','R'.
++** gctINT8 signature[4];
++** gctUINT32 binFileVersion;
++** gctUINT32 compilerVersion[2];
++** gctUINT32 gcSLVersion;
++** gctUINT32 binarySize;
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++** Shader type will be returned if type in shader object is not gcSHADER_TYPE_PRECOMPILED
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containing the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++** nothing
++**
++*/
++gceSTATUS
++gcSHADER_LoadHeader(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize,
++ OUT gctUINT32 * ShaderVersion
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadKernel
++**
++** Load a kernel function given by name into gcSHADER object
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSTRING KernelName
++** Pointer to a kernel function name
++**
++** OUTPUT:
++** nothing
++**
++*/
++gceSTATUS
++gcSHADER_LoadKernel(
++ IN gcSHADER Shader,
++ IN gctSTRING KernelName
++ );
++
++/*******************************************************************************
++** gcSHADER_Load
++********************************************************************************
++**
++** Load a gcSHADER object from a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containg the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Load(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_Save
++********************************************************************************
++**
++** Save a gcSHADER object to a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer to be used as storage for the gcSHADER
++** object. If 'Buffer' is gcvNULL, the gcSHADER object will not be saved,
++** but the number of bytes required to hold the binary output for the
++** gcSHADER object will be returned.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable holding the number of bytes allocated in
++** 'Buffer'. Only valid if 'Buffer' is not gcvNULL.
++**
++** OUTPUT:
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable receiving the number of bytes required to hold
++** the binary form of the gcSHADER object.
++*/
++gceSTATUS
++gcSHADER_Save(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN OUT gctSIZE_T * BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_LoadEx
++********************************************************************************
++**
++** Load a gcSHADER object from a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer containg the shader data to load.
++**
++** gctSIZE_T BufferSize
++** Number of bytes inside the binary buffer pointed to by 'Buffer'.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_LoadEx(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN gctSIZE_T BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_SaveEx
++********************************************************************************
++**
++** Save a gcSHADER object to a binary buffer.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctPOINTER Buffer
++** Pointer to a binary buffer to be used as storage for the gcSHADER
++** object. If 'Buffer' is gcvNULL, the gcSHADER object will not be saved,
++** but the number of bytes required to hold the binary output for the
++** gcSHADER object will be returned.
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable holding the number of bytes allocated in
++** 'Buffer'. Only valid if 'Buffer' is not gcvNULL.
++**
++** OUTPUT:
++**
++** gctSIZE_T * BufferSize
++** Pointer to a variable receiving the number of bytes required to hold
++** the binary form of the gcSHADER object.
++*/
++gceSTATUS
++gcSHADER_SaveEx(
++ IN gcSHADER Shader,
++ IN gctPOINTER Buffer,
++ IN OUT gctSIZE_T * BufferSize
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateAttributes
++**
++** Reallocate an array of pointers to gcATTRIBUTE objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateAttributes(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddAttribute
++********************************************************************************
++**
++** Add an attribute to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the attribute to add.
++**
++** gcSHADER_TYPE Type
++** Type of the attribute to add.
++**
++** gctSIZE_T Length
++** Array length of the attribute to add. 'Length' must be at least 1.
++**
++** gctBOOL IsTexture
++** gcvTRUE if the attribute is used as a texture coordinate, gcvFALSE if not.
++**
++** OUTPUT:
++**
++** gcATTRIBUTE * Attribute
++** Pointer to a variable receiving the gcATTRIBUTE object pointer.
++*/
++gceSTATUS
++gcSHADER_AddAttribute(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctBOOL IsTexture,
++ OUT gcATTRIBUTE * Attribute
++ );
++
++/*******************************************************************************
++** gcSHADER_GetAttributeCount
++********************************************************************************
++**
++** Get the number of attributes for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of attributes.
++*/
++gceSTATUS
++gcSHADER_GetAttributeCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetAttribute
++********************************************************************************
++**
++** Get the gcATTRIBUTE object poniter for an indexed attribute for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of the attribute to retrieve.
++**
++** OUTPUT:
++**
++** gcATTRIBUTE * Attribute
++** Pointer to a variable receiving the gcATTRIBUTE object pointer.
++*/
++gceSTATUS
++gcSHADER_GetAttribute(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcATTRIBUTE * Attribute
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateUniforms
++**
++** Reallocate an array of pointers to gcUNIFORM objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateUniforms(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniform
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddUniform(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddPreRotationUniform
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** gctINT col
++** Which uniform.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddPreRotationUniform(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctINT col,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniformEx
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gcSHADER_PRECISION precision
++** Precision of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_AddUniformEx(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gcSHADER_PRECISION precision,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_AddUniformEx1
++********************************************************************************
++**
++** Add an uniform to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the uniform to add.
++**
++** gcSHADER_TYPE Type
++** Type of the uniform to add.
++**
++** gcSHADER_PRECISION precision
++** Precision of the uniform to add.
++**
++** gctSIZE_T Length
++** Array length of the uniform to add. 'Length' must be at least 1.
++**
++** gcSHADER_VAR_CATEGORY varCategory
++** Variable category, normal or struct.
++**
++** gctUINT16 numStructureElement
++** If struct, its element number.
++**
++** gctINT16 parent
++** If struct, parent index in gcSHADER.variables.
++**
++** gctINT16 prevSibling
++** If struct, previous sibling index in gcSHADER.variables.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++**
++** gctINT16* ThisUniformIndex
++** Returned value about uniform index in gcSHADER.
++*/
++gceSTATUS
++gcSHADER_AddUniformEx1(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gcSHADER_PRECISION precision,
++ IN gctSIZE_T Length,
++ IN gctINT IsArray,
++ IN gcSHADER_VAR_CATEGORY varCategory,
++ IN gctUINT16 numStructureElement,
++ IN gctINT16 parent,
++ IN gctINT16 prevSibling,
++ OUT gctINT16* ThisUniformIndex,
++ OUT gcUNIFORM * Uniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetUniformCount
++********************************************************************************
++**
++** Get the number of uniforms for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of uniforms.
++*/
++gceSTATUS
++gcSHADER_GetUniformCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetPreRotationUniform
++********************************************************************************
++**
++** Get the preRotate Uniform.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gcUNIFORM ** pUniform
++** Pointer to a preRotation uniforms array.
++*/
++gceSTATUS
++gcSHADER_GetPreRotationUniform(
++ IN gcSHADER Shader,
++ OUT gcUNIFORM ** pUniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetUniform
++********************************************************************************
++**
++** Get the gcUNIFORM object pointer for an indexed uniform for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of the uniform to retrieve.
++**
++** OUTPUT:
++**
++** gcUNIFORM * Uniform
++** Pointer to a variable receiving the gcUNIFORM object pointer.
++*/
++gceSTATUS
++gcSHADER_GetUniform(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcUNIFORM * Uniform
++ );
++
++
++/*******************************************************************************
++** gcSHADER_GetUniformIndexingRange
++********************************************************************************
++**
++** Get the gcUNIFORM object pointer for an indexed uniform for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctINT uniformIndex
++** Index of the start uniform.
++**
++** gctINT offset
++** Offset to indexing.
++**
++** OUTPUT:
++**
++** gctINT * LastUniformIndex
++** Pointer to index of last uniform in indexing range.
++**
++** gctINT * OffsetUniformIndex
++** Pointer to index of uniform that indexing at offset.
++**
++** gctINT * DeviationInOffsetUniform
++** Pointer to offset in uniform picked up.
++*/
++gceSTATUS
++gcSHADER_GetUniformIndexingRange(
++ IN gcSHADER Shader,
++ IN gctINT uniformIndex,
++ IN gctINT offset,
++ OUT gctINT * LastUniformIndex,
++ OUT gctINT * OffsetUniformIndex,
++ OUT gctINT * DeviationInOffsetUniform
++ );
++
++/*******************************************************************************
++** gcSHADER_GetKernelFucntion
++**
++** Get the gcKERNEL_FUNCTION object pointer for an indexed kernel function for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of kernel function to retreive the name for.
++**
++** OUTPUT:
++**
++** gcKERNEL_FUNCTION * KernelFunction
++** Pointer to a variable receiving the gcKERNEL_FUNCTION object pointer.
++*/
++gceSTATUS
++gcSHADER_GetKernelFunction(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_GetKernelFunctionByName(
++ IN gcSHADER Shader,
++ IN gctSTRING KernelName,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++/*******************************************************************************
++** gcSHADER_GetKernelFunctionCount
++**
++** Get the number of kernel functions for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of kernel functions.
++*/
++gceSTATUS
++gcSHADER_GetKernelFunctionCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateOutputs
++**
++** Reallocate an array of pointers to gcOUTPUT objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateOutputs(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOutput
++********************************************************************************
++**
++** Add an output to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the output to add.
++**
++** gcSHADER_TYPE Type
++** Type of the output to add.
++**
++** gctSIZE_T Length
++** Array length of the output to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the output value.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOutput(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister
++ );
++
++gceSTATUS
++gcSHADER_AddOutputIndexed(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gctSIZE_T Index,
++ IN gctUINT16 TempIndex
++ );
++
++/*******************************************************************************
++** gcSHADER_GetOutputCount
++********************************************************************************
++**
++** Get the number of outputs for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of outputs.
++*/
++gceSTATUS
++gcSHADER_GetOutputCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetOutput
++********************************************************************************
++**
++** Get the gcOUTPUT object pointer for an indexed output for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of output to retrieve.
++**
++** OUTPUT:
++**
++** gcOUTPUT * Output
++** Pointer to a variable receiving the gcOUTPUT object pointer.
++*/
++gceSTATUS
++gcSHADER_GetOutput(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcOUTPUT * Output
++ );
++
++
++/*******************************************************************************
++** gcSHADER_GetOutputByName
++********************************************************************************
++**
++** Get the gcOUTPUT object pointer for this shader by output name.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSTRING name
++** Name of output to retrieve.
++**
++** gctSIZE_T nameLength
++** Length of name to retrieve
++**
++** OUTPUT:
++**
++** gcOUTPUT * Output
++** Pointer to a variable receiving the gcOUTPUT object pointer.
++*/
++gceSTATUS
++gcSHADER_GetOutputByName(
++ IN gcSHADER Shader,
++ IN gctSTRING name,
++ IN gctSIZE_T nameLength,
++ OUT gcOUTPUT * Output
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateVariables
++**
++** Reallocate an array of pointers to gcVARIABLE objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateVariables(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++/*******************************************************************************
++** gcSHADER_AddVariable
++********************************************************************************
++**
++** Add a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the variable to add.
++**
++** gcSHADER_TYPE Type
++** Type of the variable to add.
++**
++** gctSIZE_T Length
++** Array length of the variable to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the variable value.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddVariable(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister
++ );
++
++
++/*******************************************************************************
++** gcSHADER_AddVariableEx
++********************************************************************************
++**
++** Add a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctCONST_STRING Name
++** Name of the variable to add.
++**
++** gcSHADER_TYPE Type
++** Type of the variable to add.
++**
++** gctSIZE_T Length
++** Array length of the variable to add. 'Length' must be at least 1.
++**
++** gctUINT16 TempRegister
++** Temporary register index that holds the variable value.
++**
++** gcSHADER_VAR_CATEGORY varCategory
++** Variable category, normal or struct.
++**
++** gctUINT16 numStructureElement
++** If struct, its element number.
++**
++** gctINT16 parent
++** If struct, parent index in gcSHADER.variables.
++**
++** gctINT16 prevSibling
++** If struct, previous sibling index in gcSHADER.variables.
++**
++** OUTPUT:
++**
++** gctINT16* ThisVarIndex
++** Returned value about variable index in gcSHADER.
++*/
++gceSTATUS
++gcSHADER_AddVariableEx(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ IN gctUINT16 TempRegister,
++ IN gcSHADER_VAR_CATEGORY varCategory,
++ IN gctUINT16 numStructureElement,
++ IN gctINT16 parent,
++ IN gctINT16 prevSibling,
++ OUT gctINT16* ThisVarIndex
++ );
++
++/*******************************************************************************
++** gcSHADER_UpdateVariable
++********************************************************************************
++**
++** Update a variable to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of variable to retrieve.
++**
++** gceVARIABLE_UPDATE_FLAGS flag
++** Flag which property of variable will be updated.
++**
++** gctUINT newValue
++** New value to update.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_UpdateVariable(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ IN gceVARIABLE_UPDATE_FLAGS flag,
++ IN gctUINT newValue
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariableCount
++********************************************************************************
++**
++** Get the number of variables for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Count
++** Pointer to a variable receiving the number of variables.
++*/
++gceSTATUS
++gcSHADER_GetVariableCount(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * Count
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariable
++********************************************************************************
++**
++** Get the gcVARIABLE object pointer for an indexed variable for this shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Index
++** Index of variable to retrieve.
++**
++** OUTPUT:
++**
++** gcVARIABLE * Variable
++** Pointer to a variable receiving the gcVARIABLE object pointer.
++*/
++gceSTATUS
++gcSHADER_GetVariable(
++ IN gcSHADER Shader,
++ IN gctUINT Index,
++ OUT gcVARIABLE * Variable
++ );
++
++/*******************************************************************************
++** gcSHADER_GetVariableIndexingRange
++********************************************************************************
++**
++** Get the gcVARIABLE indexing range.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcVARIABLE variable
++** Start variable.
++**
++** gctBOOL whole
++** Indicate whether maximum indexing range is queried
++**
++** OUTPUT:
++**
++** gctUINT *Start
++** Pointer to range start (temp register index).
++**
++** gctUINT *End
++** Pointer to range end (temp register index).
++*/
++gceSTATUS
++gcSHADER_GetVariableIndexingRange(
++ IN gcSHADER Shader,
++ IN gcVARIABLE variable,
++ IN gctBOOL whole,
++ OUT gctUINT *Start,
++ OUT gctUINT *End
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcode
++********************************************************************************
++**
++** Add an opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the target
++** of the opcode.
++**
++** gcSL_FORMAT Format
++** Format of the temporary register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcode(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddOpcode2(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeIndexed
++********************************************************************************
++**
++** Add an opcode to a gcSHADER object that writes to an dynamically indexed
++** target.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the
++** target of the opcode.
++**
++** gcSL_INDEXED Mode
++** Location of the dynamic index inside the temporary register. Valid
++** values can be:
++**
++** gcSL_INDEXED_X - Use x component of the temporary register.
++** gcSL_INDEXED_Y - Use y component of the temporary register.
++** gcSL_INDEXED_Z - Use z component of the temporary register.
++** gcSL_INDEXED_W - Use w component of the temporary register.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** gcSL_FORMAT Format
++** Format of the temporary register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionIndexed
++**
++** Add an opcode to a gcSHADER object that writes to an dynamically indexed
++** target.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition to check.
++**
++** gctUINT16 TempRegister
++** Temporary register index that acts as the target of the opcode.
++**
++** gctUINT8 Enable
++** Write enable bits for the temporary register that acts as the
++** target of the opcode.
++**
++** gcSL_INDEXED Indexed
++** Location of the dynamic index inside the temporary register. Valid
++** values can be:
++**
++** gcSL_INDEXED_X - Use x component of the temporary register.
++** gcSL_INDEXED_Y - Use y component of the temporary register.
++** gcSL_INDEXED_Z - Use z component of the temporary register.
++** gcSL_INDEXED_W - Use w component of the temporary register.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT16 TempRegister,
++ IN gctUINT8 Enable,
++ IN gcSL_INDEXED Indexed,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditional
++********************************************************************************
++**
++** Add an conditional opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditional(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionalFormatted
++**
++** Add an conditional jump or call opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gcSL_FORMAT Format
++** Format of conditional operands
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionalFormatted(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gcSL_FORMAT Format,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddOpcodeConditionalFormattedEnable
++**
++** Add an conditional jump or call opcode to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_OPCODE Opcode
++** Opcode to add.
++**
++** gcSL_CONDITION Condition
++** Condition that needs to evaluate to gcvTRUE in order for the opcode to
++** execute.
++**
++** gcSL_FORMAT Format
++** Format of conditional operands
++**
++** gctUINT8 Enable
++** Write enable value for the target of the opcode.
++**
++** gctUINT Label
++** Target label if 'Condition' evaluates to gcvTRUE.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddOpcodeConditionalFormattedEnable(
++ IN gcSHADER Shader,
++ IN gcSL_OPCODE Opcode,
++ IN gcSL_CONDITION Condition,
++ IN gcSL_FORMAT Format,
++ IN gctUINT8 Enable,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddLabel
++********************************************************************************
++**
++** Define a label at the current instruction of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT Label
++** Label to define.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddLabel(
++ IN gcSHADER Shader,
++ IN gctUINT Label
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSource
++********************************************************************************
++**
++** Add a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_TYPE Type
++** Type of the source operand.
++**
++** gctUINT16 SourceIndex
++** Index of the source operand.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gcSL_FORMAT Format
++** Format of the source operand.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSource(
++ IN gcSHADER Shader,
++ IN gcSL_TYPE Type,
++ IN gctUINT16 SourceIndex,
++ IN gctUINT8 Swizzle,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceIndexed
++********************************************************************************
++**
++** Add a dynamically indexed source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcSL_TYPE Type
++** Type of the source operand.
++**
++** gctUINT16 SourceIndex
++** Index of the source operand.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gcSL_INDEXED Mode
++** Addressing mode for the index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** gcSL_FORMAT Format
++** Format of the source operand.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceIndexed(
++ IN gcSHADER Shader,
++ IN gcSL_TYPE Type,
++ IN gctUINT16 SourceIndex,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceAttribute
++********************************************************************************
++**
++** Add an attribute as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the attribute in case the attribute is a matrix
++** or array.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceAttribute(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceAttributeIndexed
++********************************************************************************
++**
++** Add an indexed attribute as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the attribute in case the attribute is a matrix
++** or array.
++**
++** gcSL_INDEXED Mode
++** Addressing mode of the dynamic index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceAttributeIndexed(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceUniform
++********************************************************************************
++**
++** Add a uniform as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the uniform in case the uniform is a matrix or
++** array.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceUniform(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceUniformIndexed
++********************************************************************************
++**
++** Add an indexed uniform as a source operand to a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctUINT8 Swizzle
++** x, y, z, and w swizzle values packed into one 8-bit value.
++**
++** gctINT Index
++** Static index into the uniform in case the uniform is a matrix or
++** array.
++**
++** gcSL_INDEXED Mode
++** Addressing mode of the dynamic index.
++**
++** gctUINT16 IndexRegister
++** Temporary register index that holds the dynamic index.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceUniformIndexed(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++gceSTATUS
++gcSHADER_AddSourceSamplerIndexed(
++ IN gcSHADER Shader,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister
++ );
++
++gceSTATUS
++gcSHADER_AddSourceAttributeFormatted(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceAttributeIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gcATTRIBUTE Attribute,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceUniformFormatted(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceUniformIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gcUNIFORM Uniform,
++ IN gctUINT8 Swizzle,
++ IN gctINT Index,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++gceSTATUS
++gcSHADER_AddSourceSamplerIndexedFormatted(
++ IN gcSHADER Shader,
++ IN gctUINT8 Swizzle,
++ IN gcSL_INDEXED Mode,
++ IN gctUINT16 IndexRegister,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceConstant
++********************************************************************************
++**
++** Add a constant floating point value as a source operand to a gcSHADER
++** object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctFLOAT Constant
++** Floating point constant.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceConstant(
++ IN gcSHADER Shader,
++ IN gctFLOAT Constant
++ );
++
++/*******************************************************************************
++** gcSHADER_AddSourceConstantFormatted
++********************************************************************************
++**
++** Add a constant value as a source operand to a gcSHADER
++** object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** void * Constant
++** Pointer to constant.
++**
++** gcSL_FORMAT Format
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_AddSourceConstantFormatted(
++ IN gcSHADER Shader,
++ IN void *Constant,
++ IN gcSL_FORMAT Format
++ );
++
++/*******************************************************************************
++** gcSHADER_Pack
++********************************************************************************
++**
++** Pack a dynamically created gcSHADER object by trimming the allocated arrays
++** and resolving all the labeling.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_Pack(
++ IN gcSHADER Shader
++ );
++
++/*******************************************************************************
++** gcSHADER_SetOptimizationOption
++********************************************************************************
++**
++** Set optimization option of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctUINT OptimizationOption
++** Optimization option. Can be one of the following:
++**
++** 0 - No optimization.
++** 1 - Full optimization.
++** Other value - For optimizer testing.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcSHADER_SetOptimizationOption(
++ IN gcSHADER Shader,
++ IN gctUINT OptimizationOption
++ );
++
++/*******************************************************************************
++** gcSHADER_ReallocateFunctions
++**
++** Reallocate an array of pointers to gcFUNCTION objects.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcSHADER_ReallocateFunctions(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcSHADER_AddFunction(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ OUT gcFUNCTION * Function
++ );
++
++gceSTATUS
++gcSHADER_ReallocateKernelFunctions(
++ IN gcSHADER Shader,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcSHADER_AddKernelFunction(
++ IN gcSHADER Shader,
++ IN gctCONST_STRING Name,
++ OUT gcKERNEL_FUNCTION * KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_BeginFunction(
++ IN gcSHADER Shader,
++ IN gcFUNCTION Function
++ );
++
++gceSTATUS
++gcSHADER_EndFunction(
++ IN gcSHADER Shader,
++ IN gcFUNCTION Function
++ );
++
++gceSTATUS
++gcSHADER_BeginKernelFunction(
++ IN gcSHADER Shader,
++ IN gcKERNEL_FUNCTION KernelFunction
++ );
++
++gceSTATUS
++gcSHADER_EndKernelFunction(
++ IN gcSHADER Shader,
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T LocalMemorySize
++ );
++
++gceSTATUS
++gcSHADER_SetMaxKernelFunctionArgs(
++ IN gcSHADER Shader,
++ IN gctUINT32 MaxKernelFunctionArgs
++ );
++
++/*******************************************************************************
++** gcSHADER_SetConstantMemorySize
++**
++** Set the constant memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T ConstantMemorySize
++** Constant memory size in bytes
++**
++** gctCHAR *ConstantMemoryBuffer
++** Constant memory buffer
++*/
++gceSTATUS
++gcSHADER_SetConstantMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T ConstantMemorySize,
++ IN gctCHAR * ConstantMemoryBuffer
++ );
++
++/*******************************************************************************
++** gcSHADER_GetConstantMemorySize
++**
++** Set the constant memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * ConstantMemorySize
++** Pointer to a variable receiving constant memory size in bytes
++**
++** gctCHAR **ConstantMemoryBuffer.
++** Pointer to a variable for returned shader constant memory buffer.
++*/
++gceSTATUS
++gcSHADER_GetConstantMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * ConstantMemorySize,
++ OUT gctCHAR ** ConstantMemoryBuffer
++ );
++
++/*******************************************************************************
++** gcSHADER_SetPrivateMemorySize
++**
++** Set the private memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T PrivateMemorySize
++** Private memory size in bytes
++*/
++gceSTATUS
++gcSHADER_SetPrivateMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T PrivateMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_GetPrivateMemorySize
++**
++** Set the private memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * PrivateMemorySize
++** Pointer to a variable receiving private memory size in bytes
++*/
++gceSTATUS
++gcSHADER_GetPrivateMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * PrivateMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_SetLocalMemorySize
++**
++** Set the local memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** gctSIZE_T LocalMemorySize
++** Local memory size in bytes
++*/
++gceSTATUS
++gcSHADER_SetLocalMemorySize(
++ IN gcSHADER Shader,
++ IN gctSIZE_T LocalMemorySize
++ );
++
++/*******************************************************************************
++** gcSHADER_GetLocalMemorySize
++**
++** Set the local memory address space size of a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * LocalMemorySize
++** Pointer to a variable receiving lcoal memory size in bytes
++*/
++gceSTATUS
++gcSHADER_GetLocalMemorySize(
++ IN gcSHADER Shader,
++ OUT gctSIZE_T * LocalMemorySize
++ );
++
++
++/*******************************************************************************
++** gcSHADER_CheckValidity
++**
++** Check validity for a gcSHADER object.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object.
++**
++*/
++gceSTATUS
++gcSHADER_CheckValidity(
++ IN gcSHADER Shader
++ );
++
++#if gcdUSE_WCLIP_PATCH
++gceSTATUS
++gcATTRIBUTE_IsPosition(
++ IN gcATTRIBUTE Attribute,
++ OUT gctBOOL * IsPosition
++ );
++#endif
++
++/*******************************************************************************
++** gcATTRIBUTE_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the attribute. 'Type'
++** can be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** attribute was declared as an array. If the attribute was not
++** declared as an array, the array length will be 1. 'ArrayLength' can
++** be gcvNULL, in which case no array length will be returned.
++*/
++gceSTATUS
++gcATTRIBUTE_GetType(
++ IN gcATTRIBUTE Attribute,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcATTRIBUTE_GetName
++********************************************************************************
++**
++** Get the name of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the attribute name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the attribute name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcATTRIBUTE_GetName(
++ IN gcATTRIBUTE Attribute,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++** gcATTRIBUTE_IsEnabled
++********************************************************************************
++**
++** Query the enabled state of a gcATTRIBUTE object.
++**
++** INPUT:
++**
++** gcATTRIBUTE Attribute
++** Pointer to a gcATTRIBUTE object.
++**
++** OUTPUT:
++**
++** gctBOOL * Enabled
++** Pointer to a variable receiving the enabled state of the attribute.
++*/
++gceSTATUS
++gcATTRIBUTE_IsEnabled(
++ IN gcATTRIBUTE Attribute,
++ OUT gctBOOL * Enabled
++ );
++
++gceSTATUS
++gcATTRIBUTE_GetIndex(
++ IN gcATTRIBUTE Attribute,
++ OUT gctUINT16 * Index
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the uniform. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** uniform was declared as an array. If the uniform was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetType(
++ IN gcUNIFORM Uniform,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetTypeEx
++********************************************************************************
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the uniform. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gcSHADER_PRECISION * Precision
++** Pointer to a variable receiving the precision of the uniform. 'Precision' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** uniform was declared as an array. If the uniform was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetTypeEx(
++ IN gcUNIFORM Uniform,
++ OUT gcSHADER_TYPE * Type,
++ OUT gcSHADER_PRECISION * Precision,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetFlags
++********************************************************************************
++**
++** Get the flags of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gceUNIFORM_FLAGS * Flags
++** Pointer to a variable receiving the flags of the uniform.
++**
++*/
++gceSTATUS
++gcUNIFORM_GetFlags(
++ IN gcUNIFORM Uniform,
++ OUT gceUNIFORM_FLAGS * Flags
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetFlags
++********************************************************************************
++**
++** Set the flags of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gceUNIFORM_FLAGS Flags
++** Flags of the uniform to be set.
++**
++** OUTPUT:
++** Nothing.
++**
++*/
++gceSTATUS
++gcUNIFORM_SetFlags(
++ IN gcUNIFORM Uniform,
++ IN gceUNIFORM_FLAGS Flags
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetName
++********************************************************************************
++**
++** Get the name of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the uniform name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the uniform name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetName(
++ IN gcUNIFORM Uniform,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetSampler
++********************************************************************************
++**
++** Get the physical sampler number for a sampler gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gctUINT32 * Sampler
++** Pointer to a variable receiving the physical sampler.
++*/
++gceSTATUS
++gcUNIFORM_GetSampler(
++ IN gcUNIFORM Uniform,
++ OUT gctUINT32 * Sampler
++ );
++
++gceSTATUS
++gcUNIFORM_GetIndex(
++ IN gcUNIFORM Uniform,
++ OUT gctUINT16 * Index
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetFormat
++**
++** Get the type and array length of a gcUNIFORM object.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** gcSL_FORMAT * Format
++** Pointer to a variable receiving the format of element of the uniform.
++** 'Type' can be gcvNULL, in which case no type will be returned.
++**
++** gctBOOL * IsPointer
++** Pointer to a variable receiving the state wheter the uniform is a pointer.
++** 'IsPointer' can be gcvNULL, in which case no array length will be returned.
++*/
++gceSTATUS
++gcUNIFORM_GetFormat(
++ IN gcUNIFORM Uniform,
++ OUT gcSL_FORMAT * Format,
++ OUT gctBOOL * IsPointer
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetFormat
++**
++** Set the format and isPointer of a uniform.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gcSL_FORMAT Format
++** Format of element of the uniform shaderType.
++**
++** gctBOOL IsPointer
++** Wheter the uniform is a pointer.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetFormat(
++ IN gcUNIFORM Uniform,
++ IN gcSL_FORMAT Format,
++ IN gctBOOL IsPointer
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValue
++********************************************************************************
++**
++** Set the value of a uniform in integer.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctINT * Value
++** Pointer to a buffer holding the integer values for the uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValue(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN const gctINT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValueX
++********************************************************************************
++**
++** Set the value of a uniform in fixed point.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctFIXED_POINT * Value
++** Pointer to a buffer holding the fixed point values for the uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValueX(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN gctFIXED_POINT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_SetValueF
++********************************************************************************
++**
++** Set the value of a uniform in floating point.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** gctSIZE_T Count
++** Number of entries to program if the uniform has been declared as an
++** array.
++**
++** const gctFLOAT * Value
++** Pointer to a buffer holding the floating point values for the
++** uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_SetValueF(
++ IN gcUNIFORM Uniform,
++ IN gctSIZE_T Count,
++ IN const gctFLOAT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_ProgramF
++**
++** Set the value of a uniform in floating point.
++**
++** INPUT:
++**
++** gctUINT32 Address
++** Address of Uniform.
++**
++** gctSIZE_T Row/Col
++**
++** const gctFLOAT * Value
++** Pointer to a buffer holding the floating point values for the
++** uniform.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gcUNIFORM_ProgramF(
++ IN gctUINT32 Address,
++ IN gctSIZE_T Row,
++ IN gctSIZE_T Col,
++ IN const gctFLOAT * Value
++ );
++
++/*******************************************************************************
++** gcUNIFORM_GetModelViewProjMatrix
++********************************************************************************
++**
++** Get the value of uniform modelViewProjMatrix ID if present.
++**
++** INPUT:
++**
++** gcUNIFORM Uniform
++** Pointer to a gcUNIFORM object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gctUINT
++gcUNIFORM_GetModelViewProjMatrix(
++ IN gcUNIFORM Uniform
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetType
++********************************************************************************
++**
++** Get the type and array length of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gcSHADER_TYPE * Type
++** Pointer to a variable receiving the type of the output. 'Type' can
++** be gcvNULL, in which case no type will be returned.
++**
++** gctSIZE_T * ArrayLength
++** Pointer to a variable receiving the length of the array if the
++** output was declared as an array. If the output was not declared
++** as an array, the array length will be 1. 'ArrayLength' can be gcvNULL,
++** in which case no array length will be returned.
++*/
++gceSTATUS
++gcOUTPUT_GetType(
++ IN gcOUTPUT Output,
++ OUT gcSHADER_TYPE * Type,
++ OUT gctSIZE_T * ArrayLength
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetIndex
++********************************************************************************
++**
++** Get the index of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gctUINT * Index
++** Pointer to a variable receiving the temporary register index of the
++** output. 'Index' can be gcvNULL,. in which case no index will be
++** returned.
++*/
++gceSTATUS
++gcOUTPUT_GetIndex(
++ IN gcOUTPUT Output,
++ OUT gctUINT * Index
++ );
++
++/*******************************************************************************
++** gcOUTPUT_GetName
++********************************************************************************
++**
++** Get the name of a gcOUTPUT object.
++**
++** INPUT:
++**
++** gcOUTPUT Output
++** Pointer to a gcOUTPUT object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Length
++** Pointer to a variable receiving the length of the output name.
++** 'Length' can be gcvNULL, in which case no length will be returned.
++**
++** gctCONST_STRING * Name
++** Pointer to a variable receiving the pointer to the output name.
++** 'Name' can be gcvNULL, in which case no name will be returned.
++*/
++gceSTATUS
++gcOUTPUT_GetName(
++ IN gcOUTPUT Output,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++/*******************************************************************************
++*********************************************************** F U N C T I O N S **
++*******************************************************************************/
++
++/*******************************************************************************
++** gcFUNCTION_ReallocateArguments
++**
++** Reallocate an array of gcsFUNCTION_ARGUMENT objects.
++**
++** INPUT:
++**
++** gcFUNCTION Function
++** Pointer to a gcFUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcFUNCTION_ReallocateArguments(
++ IN gcFUNCTION Function,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcFUNCTION_AddArgument(
++ IN gcFUNCTION Function,
++ IN gctUINT16 TempIndex,
++ IN gctUINT8 Enable,
++ IN gctUINT8 Qualifier
++ );
++
++gceSTATUS
++gcFUNCTION_GetArgument(
++ IN gcFUNCTION Function,
++ IN gctUINT16 Index,
++ OUT gctUINT16_PTR Temp,
++ OUT gctUINT8_PTR Enable,
++ OUT gctUINT8_PTR Swizzle
++ );
++
++gceSTATUS
++gcFUNCTION_GetLabel(
++ IN gcFUNCTION Function,
++ OUT gctUINT_PTR Label
++ );
++
++/*******************************************************************************
++************************* K E R N E L P R O P E R T Y F U N C T I O N S **
++*******************************************************************************/
++/*******************************************************************************/
++gceSTATUS
++gcKERNEL_FUNCTION_AddKernelFunctionProperties(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctINT propertyType,
++ IN gctSIZE_T propertySize,
++ IN gctINT * values
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetPropertyCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetProperty(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gctSIZE_T * propertySize,
++ OUT gctINT * propertyType,
++ OUT gctINT * propertyValues
++ );
++
++
++/*******************************************************************************
++*******************************I M A G E S A M P L E R F U N C T I O N S **
++*******************************************************************************/
++/*******************************************************************************
++** gcKERNEL_FUNCTION_ReallocateImageSamplers
++**
++** Reallocate an array of pointers to image sampler pair.
++**
++** INPUT:
++**
++** gcKERNEL_FUNCTION KernelFunction
++** Pointer to a gcKERNEL_FUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateImageSamplers(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddImageSampler(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT8 ImageNum,
++ IN gctBOOL IsConstantSamplerType,
++ IN gctUINT32 SamplerType
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetImageSamplerCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetImageSampler(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gctUINT8 *ImageNum,
++ OUT gctBOOL *IsConstantSamplerType,
++ OUT gctUINT32 *SamplerType
++ );
++
++/*******************************************************************************
++*********************************************K E R N E L F U N C T I O N S **
++*******************************************************************************/
++
++/*******************************************************************************
++** gcKERNEL_FUNCTION_ReallocateArguments
++**
++** Reallocate an array of gcsFUNCTION_ARGUMENT objects.
++**
++** INPUT:
++**
++** gcKERNEL_FUNCTION Function
++** Pointer to a gcKERNEL_FUNCTION object.
++**
++** gctSIZE_T Count
++** Array count to reallocate. 'Count' must be at least 1.
++*/
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateArguments(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddArgument(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctUINT16 TempIndex,
++ IN gctUINT8 Enable,
++ IN gctUINT8 Qualifier
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetArgument(
++ IN gcKERNEL_FUNCTION Function,
++ IN gctUINT16 Index,
++ OUT gctUINT16_PTR Temp,
++ OUT gctUINT8_PTR Enable,
++ OUT gctUINT8_PTR Swizzle
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetLabel(
++ IN gcKERNEL_FUNCTION Function,
++ OUT gctUINT_PTR Label
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetName(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Length,
++ OUT gctCONST_STRING * Name
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_ReallocateUniformArguments(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctSIZE_T Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_AddUniformArgument(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctCONST_STRING Name,
++ IN gcSHADER_TYPE Type,
++ IN gctSIZE_T Length,
++ OUT gcUNIFORM * UniformArgument
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetUniformArgumentCount(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ OUT gctSIZE_T * Count
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_GetUniformArgument(
++ IN gcKERNEL_FUNCTION KernelFunction,
++ IN gctUINT Index,
++ OUT gcUNIFORM * UniformArgument
++ );
++
++gceSTATUS
++gcKERNEL_FUNCTION_SetCodeEnd(
++ IN gcKERNEL_FUNCTION KernelFunction
++ );
++
++/*******************************************************************************
++** gcCompileShader
++********************************************************************************
++**
++** Compile a shader.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctINT ShaderType
++** Shader type to compile. Can be one of the following values:
++**
++** gcSHADER_TYPE_VERTEX
++** Compile a vertex shader.
++**
++** gcSHADER_TYPE_FRAGMENT
++** Compile a fragment shader.
++**
++** gctSIZE_T SourceSize
++** Size of the source buffer in bytes.
++**
++** gctCONST_STRING Source
++** Pointer to the buffer containing the shader source code.
++**
++** OUTPUT:
++**
++** gcSHADER * Binary
++** Pointer to a variable receiving the pointer to a gcSHADER object
++** containg the compiled shader code.
++**
++** gctSTRING * Log
++** Pointer to a variable receiving a string pointer containging the
++** compile log.
++*/
++gceSTATUS
++gcCompileShader(
++ IN gcoHAL Hal,
++ IN gctINT ShaderType,
++ IN gctSIZE_T SourceSize,
++ IN gctCONST_STRING Source,
++ OUT gcSHADER * Binary,
++ OUT gctSTRING * Log
++ );
++
++/*******************************************************************************
++** gcOptimizeShader
++********************************************************************************
++**
++** Optimize a shader.
++**
++** INPUT:
++**
++** gcSHADER Shader
++** Pointer to a gcSHADER object holding information about the compiled
++** shader.
++**
++** gctFILE LogFile
++** Pointer to an open FILE object.
++*/
++gceSTATUS
++gcOptimizeShader(
++ IN gcSHADER Shader,
++ IN gctFILE LogFile
++ );
++
++/*******************************************************************************
++** gcLinkShaders
++********************************************************************************
++**
++** Link two shaders and generate a harwdare specific state buffer by compiling
++** the compiler generated code through the resource allocator and code
++** generator.
++**
++** INPUT:
++**
++** gcSHADER VertexShader
++** Pointer to a gcSHADER object holding information about the compiled
++** vertex shader.
++**
++** gcSHADER FragmentShader
++** Pointer to a gcSHADER object holding information about the compiled
++** fragment shader.
++**
++** gceSHADER_FLAGS Flags
++** Compiler flags. Can be any of the following:
++**
++** gcvSHADER_DEAD_CODE - Dead code elimination.
++** gcvSHADER_RESOURCE_USAGE - Resource usage optimizaion.
++** gcvSHADER_OPTIMIZER - Full optimization.
++** gcvSHADER_USE_GL_Z - Use OpenGL ES Z coordinate.
++** gcvSHADER_USE_GL_POSITION - Use OpenGL ES gl_Position.
++** gcvSHADER_USE_GL_FACE - Use OpenGL ES gl_FaceForward.
++**
++** OUTPUT:
++**
++** gctSIZE_T * StateBufferSize
++** Pointer to a variable receicing the number of bytes in the buffer
++** returned in 'StateBuffer'.
++**
++** gctPOINTER * StateBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLinkShaders(
++ IN gcSHADER VertexShader,
++ IN gcSHADER FragmentShader,
++ IN gceSHADER_FLAGS Flags,
++ OUT gctSIZE_T * StateBufferSize,
++ OUT gctPOINTER * StateBuffer,
++ OUT gcsHINT_PTR * Hints,
++ OUT gcMACHINECODE_PTR *ppVsMachineCode,
++ OUT gcMACHINECODE_PTR *ppFsMachineCode
++ );
++
++/*******************************************************************************
++** gcLoadShaders
++********************************************************************************
++**
++** Load a pre-compiled and pre-linked shader program into the hardware.
++**
++** INPUT:
++**
++** gcoHAL Hal
++** Pointer to a gcoHAL object.
++**
++** gctSIZE_T StateBufferSize
++** The number of bytes in the 'StateBuffer'.
++**
++** gctPOINTER StateBuffer
++** Pointer to the states that make up the shader program.
++**
++** gcsHINT_PTR Hints
++** Pointer to a gcsHINT structure that contains information required
++** when loading the shader states.
++*/
++gceSTATUS
++gcLoadShaders(
++ IN gcoHAL Hal,
++ IN gctSIZE_T StateBufferSize,
++ IN gctPOINTER StateBuffer,
++ IN gcsHINT_PTR Hints
++ );
++
++gceSTATUS
++gcRecompileShaders(
++ IN gcoHAL Hal,
++ IN gcMACHINECODE_PTR pVsMachineCode,
++ IN gcMACHINECODE_PTR pPsMachineCode,
++ /*Recompile variables*/
++ IN OUT gctPOINTER *ppRecompileStateBuffer,
++ IN OUT gctSIZE_T *pRecompileStateBufferSize,
++ IN OUT gcsHINT_PTR *ppRecompileHints,
++ /* natvie state*/
++ IN gctPOINTER pNativeStateBuffer,
++ IN gctSIZE_T nativeStateBufferSize,
++ IN gcsHINT_PTR pNativeHints,
++ /* npt info */
++ IN gctUINT32 Samplers,
++ IN gctUINT32 *SamplerWrapS,
++ IN gctUINT32 *SamplerWrapT
++ );
++
++gceSTATUS
++gcRecompileDepthBias(
++ IN gcoHAL Hal,
++ IN gcMACHINECODE_PTR pVsMachineCode,
++ /*Recompile variables*/
++ IN OUT gctPOINTER *ppRecompileStateBuffer,
++ IN OUT gctSIZE_T *pRecompileStateBufferSize,
++ IN OUT gcsHINT_PTR *ppRecompileHints,
++ /* natvie state*/
++ IN gctPOINTER pNativeStateBuffer,
++ IN gctSIZE_T nativeStateBufferSize,
++ IN gcsHINT_PTR pNativeHints,
++ OUT gctINT * uniformAddr,
++ OUT gctINT * uniformChannel
++ );
++
++/*******************************************************************************
++** gcSaveProgram
++********************************************************************************
++**
++** Save pre-compiled shaders and pre-linked programs to a binary file.
++**
++** INPUT:
++**
++** gcSHADER VertexShader
++** Pointer to vertex shader object.
++**
++** gcSHADER FragmentShader
++** Pointer to fragment shader object.
++**
++** gctSIZE_T ProgramBufferSize
++** Number of bytes in 'ProgramBuffer'.
++**
++** gctPOINTER ProgramBuffer
++** Pointer to buffer containing the program states.
++**
++** gcsHINT_PTR Hints
++** Pointer to HINTS structure for program states.
++**
++** OUTPUT:
++**
++** gctPOINTER * Binary
++** Pointer to a variable receiving the binary data to be saved.
++**
++** gctSIZE_T * BinarySize
++** Pointer to a variable receiving the number of bytes inside 'Binary'.
++*/
++gceSTATUS
++gcSaveProgram(
++ IN gcSHADER VertexShader,
++ IN gcSHADER FragmentShader,
++ IN gctSIZE_T ProgramBufferSize,
++ IN gctPOINTER ProgramBuffer,
++ IN gcsHINT_PTR Hints,
++ OUT gctPOINTER * Binary,
++ OUT gctSIZE_T * BinarySize
++ );
++
++/*******************************************************************************
++** gcLoadProgram
++********************************************************************************
++**
++** Load pre-compiled shaders and pre-linked programs from a binary file.
++**
++** INPUT:
++**
++** gctPOINTER Binary
++** Pointer to the binary data loaded.
++**
++** gctSIZE_T BinarySize
++** Number of bytes in 'Binary'.
++**
++** OUTPUT:
++**
++** gcSHADER VertexShader
++** Pointer to a vertex shader object.
++**
++** gcSHADER FragmentShader
++** Pointer to a fragment shader object.
++**
++** gctSIZE_T * ProgramBufferSize
++** Pointer to a variable receicing the number of bytes in the buffer
++** returned in 'ProgramBuffer'.
++**
++** gctPOINTER * ProgramBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLoadProgram(
++ IN gctPOINTER Binary,
++ IN gctSIZE_T BinarySize,
++ OUT gcSHADER VertexShader,
++ OUT gcSHADER FragmentShader,
++ OUT gctSIZE_T * ProgramBufferSize,
++ OUT gctPOINTER * ProgramBuffer,
++ OUT gcsHINT_PTR * Hints
++ );
++
++/*******************************************************************************
++** gcCompileKernel
++********************************************************************************
++**
++** Compile a OpenCL kernel shader.
++**
++** INPUT:
++**
++** gcoOS Hal
++** Pointer to an gcoHAL object.
++**
++** gctSIZE_T SourceSize
++** Size of the source buffer in bytes.
++**
++** gctCONST_STRING Source
++** Pointer to the buffer containing the shader source code.
++**
++** OUTPUT:
++**
++** gcSHADER * Binary
++** Pointer to a variable receiving the pointer to a gcSHADER object
++** containg the compiled shader code.
++**
++** gctSTRING * Log
++** Pointer to a variable receiving a string pointer containging the
++** compile log.
++*/
++gceSTATUS
++gcCompileKernel(
++ IN gcoHAL Hal,
++ IN gctSIZE_T SourceSize,
++ IN gctCONST_STRING Source,
++ IN gctCONST_STRING Options,
++ OUT gcSHADER * Binary,
++ OUT gctSTRING * Log
++ );
++
++/*******************************************************************************
++** gcLinkKernel
++********************************************************************************
++**
++** Link OpenCL kernel and generate a harwdare specific state buffer by compiling
++** the compiler generated code through the resource allocator and code
++** generator.
++**
++** INPUT:
++**
++** gcSHADER Kernel
++** Pointer to a gcSHADER object holding information about the compiled
++** OpenCL kernel.
++**
++** gceSHADER_FLAGS Flags
++** Compiler flags. Can be any of the following:
++**
++** gcvSHADER_DEAD_CODE - Dead code elimination.
++** gcvSHADER_RESOURCE_USAGE - Resource usage optimizaion.
++** gcvSHADER_OPTIMIZER - Full optimization.
++** gcvSHADER_USE_GL_Z - Use OpenGL ES Z coordinate.
++** gcvSHADER_USE_GL_POSITION - Use OpenGL ES gl_Position.
++** gcvSHADER_USE_GL_FACE - Use OpenGL ES gl_FaceForward.
++**
++** OUTPUT:
++**
++** gctSIZE_T * StateBufferSize
++** Pointer to a variable receiving the number of bytes in the buffer
++** returned in 'StateBuffer'.
++**
++** gctPOINTER * StateBuffer
++** Pointer to a variable receiving a buffer pointer that contains the
++** states required to download the shaders into the hardware.
++**
++** gcsHINT_PTR * Hints
++** Pointer to a variable receiving a gcsHINT structure pointer that
++** contains information required when loading the shader states.
++*/
++gceSTATUS
++gcLinkKernel(
++ IN gcSHADER Kernel,
++ IN gceSHADER_FLAGS Flags,
++ OUT gctSIZE_T * StateBufferSize,
++ OUT gctPOINTER * StateBuffer,
++ OUT gcsHINT_PTR * Hints
++ );
++
++/*******************************************************************************
++** gcLoadKernel
++********************************************************************************
++**
++** Load a pre-compiled and pre-linked kernel program into the hardware.
++**
++** INPUT:
++**
++** gctSIZE_T StateBufferSize
++** The number of bytes in the 'StateBuffer'.
++**
++** gctPOINTER StateBuffer
++** Pointer to the states that make up the shader program.
++**
++** gcsHINT_PTR Hints
++** Pointer to a gcsHINT structure that contains information required
++** when loading the shader states.
++*/
++gceSTATUS
++gcLoadKernel(
++ IN gctSIZE_T StateBufferSize,
++ IN gctPOINTER StateBuffer,
++ IN gcsHINT_PTR Hints
++ );
++
++gceSTATUS
++gcInvokeThreadWalker(
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++void
++gcTYPE_GetTypeInfo(
++ IN gcSHADER_TYPE Type,
++ OUT gctINT * Components,
++ OUT gctINT * Rows,
++ OUT gctCONST_STRING * Name
++ );
++
++gctBOOL
++gcOPT_doVaryingPackingForShader(
++ IN gcSHADER Shader
++ );
++
++gceSTATUS
++gcSHADER_PatchNPOTForMachineCode(
++ IN gcSHADER_KIND shaderType,
++ IN gcMACHINECODE_PTR pMachineCode,
++ IN gcNPOT_PATCH_PARAM_PTR pPatchParam,
++ IN gctUINT countOfPatchParam,
++ IN gctUINT hwSupportedInstCount,
++ OUT gctPOINTER* ppCmdBuffer,
++ OUT gctUINT32* pByteSizeOfCmdBuffer,
++ IN OUT gcsHINT_PTR pHints /* User needs copy original hints to this one, then passed this one in */
++ );
++
++gceSTATUS
++gcSHADER_PatchZBiasForMachineCodeVS(
++ IN gcMACHINECODE_PTR pMachineCode,
++ IN OUT gcZBIAS_PATCH_PARAM_PTR pPatchParam,
++ IN gctUINT hwSupportedInstCount,
++ OUT gctPOINTER* ppCmdBuffer,
++ OUT gctUINT32* pByteSizeOfCmdBuffer,
++ IN OUT gcsHINT_PTR pHints /* User needs copy original hints to this one, then passed this one in */
++ );
++
++gceSTATUS
++gcSHADER_InsertList(
++ IN gcSHADER Shader,
++ IN gcSHADER_LIST * Root,
++ IN gctINT Index,
++ IN gctINT Data0,
++ IN gctINT Data1
++ );
++
++gceSTATUS
++gcSHADER_UpdateList(
++ IN gcSHADER Shader,
++ IN gcSHADER_LIST Root,
++ IN gctINT Index,
++ IN gctINT NewIndex
++ );
++
++gceSTATUS
++gcSHADER_DeleteList(
++ IN gcSHADER Shader,
++ IN gcSHADER_LIST * Root,
++ IN gctINT Index
++ );
++
++gceSTATUS
++gcSHADER_FindList(
++ IN gcSHADER Shader,
++ IN gcSHADER_LIST Root,
++ IN gctINT Index,
++ IN gcSHADER_LIST * List
++ );
++
++gceSTATUS
++gcSHADER_InsertWClipList(
++ IN gcSHADER Shader,
++ IN gctINT Index,
++ IN gctINT Data0,
++ IN gctINT Data1
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_compiler_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_driver.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_driver.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_driver.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_driver.h 2015-07-27 23:13:06.210822785 +0200
+@@ -0,0 +1,1051 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_driver_h_
++#define __gc_hal_driver_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_driver_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++#define IOCTL_GCHAL_KERNEL_INTERFACE 30001
++#define IOCTL_GCHAL_TERMINATE 30002
++
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++typedef enum _gceHAL_COMMAND_CODES
++{
++ /* Generic query. */
++ gcvHAL_QUERY_VIDEO_MEMORY,
++ gcvHAL_QUERY_CHIP_IDENTITY,
++
++ /* Contiguous memory. */
++ gcvHAL_ALLOCATE_NON_PAGED_MEMORY,
++ gcvHAL_FREE_NON_PAGED_MEMORY,
++ gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY,
++ gcvHAL_FREE_CONTIGUOUS_MEMORY,
++
++ /* Video memory allocation. */
++ gcvHAL_ALLOCATE_VIDEO_MEMORY, /* Enforced alignment. */
++ gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY, /* No alignment. */
++ gcvHAL_FREE_VIDEO_MEMORY,
++
++ /* Physical-to-logical mapping. */
++ gcvHAL_MAP_MEMORY,
++ gcvHAL_UNMAP_MEMORY,
++
++ /* Logical-to-physical mapping. */
++ gcvHAL_MAP_USER_MEMORY,
++ gcvHAL_UNMAP_USER_MEMORY,
++
++ /* Surface lock/unlock. */
++ gcvHAL_LOCK_VIDEO_MEMORY,
++ gcvHAL_UNLOCK_VIDEO_MEMORY,
++
++ /* Event queue. */
++ gcvHAL_EVENT_COMMIT,
++
++ gcvHAL_USER_SIGNAL,
++ gcvHAL_SIGNAL,
++ gcvHAL_WRITE_DATA,
++
++ gcvHAL_COMMIT,
++ gcvHAL_STALL,
++
++ gcvHAL_READ_REGISTER,
++ gcvHAL_WRITE_REGISTER,
++
++ gcvHAL_GET_PROFILE_SETTING,
++ gcvHAL_SET_PROFILE_SETTING,
++
++ gcvHAL_READ_ALL_PROFILE_REGISTERS,
++ gcvHAL_PROFILE_REGISTERS_2D,
++#if VIVANTE_PROFILER_PERDRAW
++ gcvHAL_READ_PROFILER_REGISTER_SETTING,
++#endif
++
++ /* Power management. */
++ gcvHAL_SET_POWER_MANAGEMENT_STATE,
++ gcvHAL_QUERY_POWER_MANAGEMENT_STATE,
++
++ gcvHAL_GET_BASE_ADDRESS,
++
++ gcvHAL_SET_IDLE, /* reserved */
++
++ /* Queries. */
++ gcvHAL_QUERY_KERNEL_SETTINGS,
++
++ /* Reset. */
++ gcvHAL_RESET,
++
++ /* Map physical address into handle. */
++ gcvHAL_MAP_PHYSICAL,
++
++ /* Debugger stuff. */
++ gcvHAL_DEBUG,
++
++ /* Cache stuff. */
++ gcvHAL_CACHE,
++
++ /* TimeStamp */
++ gcvHAL_TIMESTAMP,
++
++ /* Database. */
++ gcvHAL_DATABASE,
++
++ /* Version. */
++ gcvHAL_VERSION,
++
++ /* Chip info */
++ gcvHAL_CHIP_INFO,
++
++ /* Process attaching/detaching. */
++ gcvHAL_ATTACH,
++ gcvHAL_DETACH,
++
++ /* Composition. */
++ gcvHAL_COMPOSE,
++
++ /* Set timeOut value */
++ gcvHAL_SET_TIMEOUT,
++
++ /* Frame database. */
++ gcvHAL_GET_FRAME_INFO,
++
++ /* Shared info for each process */
++ gcvHAL_GET_SHARED_INFO,
++ gcvHAL_SET_SHARED_INFO,
++ gcvHAL_QUERY_COMMAND_BUFFER,
++
++ gcvHAL_COMMIT_DONE,
++
++ /* GPU and event dump */
++ gcvHAL_DUMP_GPU_STATE,
++ gcvHAL_DUMP_EVENT,
++
++ /* Virtual command buffer. */
++ gcvHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER,
++ gcvHAL_FREE_VIRTUAL_COMMAND_BUFFER,
++
++ /* FSCALE_VAL. */
++ gcvHAL_SET_FSCALE_VALUE,
++ gcvHAL_GET_FSCALE_VALUE,
++
++ /* Reset time stamp. */
++ gcvHAL_QUERY_RESET_TIME_STAMP,
++
++ /* Sync point operations. */
++ gcvHAL_SYNC_POINT,
++
++ /* Create native fence and return its fd. */
++ gcvHAL_CREATE_NATIVE_FENCE,
++
++ /* Video memory database */
++ gcvHAL_VIDMEM_DATABASE,
++}
++gceHAL_COMMAND_CODES;
++
++/******************************************************************************\
++****************************** Interface Structure *****************************
++\******************************************************************************/
++
++#define gcdMAX_PROFILE_FILE_NAME 128
++
++/* Kernel settings. */
++typedef struct _gcsKERNEL_SETTINGS
++{
++ /* Used RealTime signal between kernel and user. */
++ gctINT signal;
++}
++gcsKERNEL_SETTINGS;
++
++
++/* gcvHAL_QUERY_CHIP_IDENTITY */
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY * gcsHAL_QUERY_CHIP_IDENTITY_PTR;
++typedef struct _gcsHAL_QUERY_CHIP_IDENTITY
++{
++
++ /* Chip model. */
++ gceCHIPMODEL chipModel;
++
++ /* Revision value.*/
++ gctUINT32 chipRevision;
++
++ /* Supported feature fields. */
++ gctUINT32 chipFeatures;
++
++ /* Supported minor feature fields. */
++ gctUINT32 chipMinorFeatures;
++
++ /* Supported minor feature 1 fields. */
++ gctUINT32 chipMinorFeatures1;
++
++ /* Supported minor feature 2 fields. */
++ gctUINT32 chipMinorFeatures2;
++
++ /* Supported minor feature 3 fields. */
++ gctUINT32 chipMinorFeatures3;
++
++ /* Supported minor feature 4 fields. */
++ gctUINT32 chipMinorFeatures4;
++
++ /* Number of streams supported. */
++ gctUINT32 streamCount;
++
++ /* Total number of temporary registers per thread. */
++ gctUINT32 registerMax;
++
++ /* Maximum number of threads. */
++ gctUINT32 threadCount;
++
++ /* Number of shader cores. */
++ gctUINT32 shaderCoreCount;
++
++ /* Size of the vertex cache. */
++ gctUINT32 vertexCacheSize;
++
++ /* Number of entries in the vertex output buffer. */
++ gctUINT32 vertexOutputBufferSize;
++
++ /* Number of pixel pipes. */
++ gctUINT32 pixelPipes;
++
++ /* Number of instructions. */
++ gctUINT32 instructionCount;
++
++ /* Number of constants. */
++ gctUINT32 numConstants;
++
++ /* Buffer size */
++ gctUINT32 bufferSize;
++
++ /* Number of varyings */
++ gctUINT32 varyingsCount;
++
++ /* Supertile layout style in hardware */
++ gctUINT32 superTileMode;
++
++ /* Special control bits for 2D chip. */
++ gctUINT32 chip2DControl;
++}
++gcsHAL_QUERY_CHIP_IDENTITY;
++
++/* gcvHAL_COMPOSE. */
++typedef struct _gcsHAL_COMPOSE * gcsHAL_COMPOSE_PTR;
++typedef struct _gcsHAL_COMPOSE
++{
++ /* Composition state buffer. */
++ gctUINT64 physical;
++ gctUINT64 logical;
++ gctUINT offset;
++ gctUINT size;
++
++ /* Composition end signal. */
++ gctUINT64 process;
++ gctUINT64 signal;
++
++ /* User signals. */
++ gctUINT64 userProcess;
++ gctUINT64 userSignal1;
++ gctUINT64 userSignal2;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. */
++ gctINT32 coid;
++
++ /* Set by server. */
++ gctINT32 rcvid;
++#endif
++}
++gcsHAL_COMPOSE;
++
++
++typedef struct _gcsHAL_INTERFACE
++{
++ /* Command code. */
++ gceHAL_COMMAND_CODES command;
++
++ /* Hardware type. */
++ gceHARDWARE_TYPE hardwareType;
++
++ /* Status value. */
++ gceSTATUS status;
++
++ /* Handle to this interface channel. */
++ gctUINT64 handle;
++
++ /* Pid of the client. */
++ gctUINT32 pid;
++
++ /* Union of command structures. */
++ union _u
++ {
++ /* gcvHAL_GET_BASE_ADDRESS */
++ struct _gcsHAL_GET_BASE_ADDRESS
++ {
++ /* Physical memory address of internal memory. */
++ OUT gctUINT32 baseAddress;
++ }
++ GetBaseAddress;
++
++ /* gcvHAL_QUERY_VIDEO_MEMORY */
++ struct _gcsHAL_QUERY_VIDEO_MEMORY
++ {
++ /* Physical memory address of internal memory. Just a name. */
++ OUT gctUINT32 internalPhysical;
++
++ /* Size in bytes of internal memory. */
++ OUT gctUINT64 internalSize;
++
++ /* Physical memory address of external memory. Just a name. */
++ OUT gctUINT32 externalPhysical;
++
++ /* Size in bytes of external memory.*/
++ OUT gctUINT64 externalSize;
++
++ /* Physical memory address of contiguous memory. Just a name. */
++ OUT gctUINT32 contiguousPhysical;
++
++ /* Size in bytes of contiguous memory.*/
++ OUT gctUINT64 contiguousSize;
++ }
++ QueryVideoMemory;
++
++ /* gcvHAL_QUERY_CHIP_IDENTITY */
++ gcsHAL_QUERY_CHIP_IDENTITY QueryChipIdentity;
++
++ /* gcvHAL_MAP_MEMORY */
++ struct _gcsHAL_MAP_MEMORY
++ {
++ /* Physical memory address to map. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to map. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory. */
++ OUT gctUINT64 logical;
++ }
++ MapMemory;
++
++ /* gcvHAL_UNMAP_MEMORY */
++ struct _gcsHAL_UNMAP_MEMORY
++ {
++ /* Physical memory address to unmap. Just a name on Linux/Qnx. */
++ IN gctUINT32 physical;
++
++ /* Number of bytes in physical memory to unmap. */
++ IN gctUINT64 bytes;
++
++ /* Address of mapped memory to unmap. */
++ IN gctUINT64 logical;
++ }
++ UnmapMemory;
++
++ /* gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_LINEAR_VIDEO_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT bytes;
++
++ /* Buffer alignment. */
++ IN gctUINT alignment;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ OUT gctUINT64 node;
++ }
++ AllocateLinearVideoMemory;
++
++ /* gcvHAL_ALLOCATE_VIDEO_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIDEO_MEMORY
++ {
++ /* Width of rectangle to allocate. */
++ IN OUT gctUINT width;
++
++ /* Height of rectangle to allocate. */
++ IN OUT gctUINT height;
++
++ /* Depth of rectangle to allocate. */
++ IN gctUINT depth;
++
++ /* Format rectangle to allocate in gceSURF_FORMAT. */
++ IN gceSURF_FORMAT format;
++
++ /* Type of allocation. */
++ IN gceSURF_TYPE type;
++
++ /* Memory pool to allocate from. */
++ IN OUT gcePOOL pool;
++
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ OUT gctUINT64 node;
++ }
++ AllocateVideoMemory;
++
++ /* gcvHAL_FREE_VIDEO_MEMORY */
++ struct _gcsHAL_FREE_VIDEO_MEMORY
++ {
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++#ifdef __QNXNTO__
++/* TODO: This is part of the unlock - why is it here? */
++ /* Mapped logical address to unmap in user space. */
++ OUT gctUINT64 memory;
++
++ /* Number of bytes to allocated. */
++ OUT gctUINT64 bytes;
++#endif
++ }
++ FreeVideoMemory;
++
++ /* gcvHAL_LOCK_VIDEO_MEMORY */
++ struct _gcsHAL_LOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory gcuVIDMEM_NODE gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++ /* Cache configuration. */
++ /* Only gcvPOOL_CONTIGUOUS and gcvPOOL_VIRUTAL
++ ** can be configured */
++ IN gctBOOL cacheable;
++
++ /* Hardware specific address. */
++ OUT gctUINT32 address;
++
++ /* Mapped logical address. */
++ OUT gctUINT64 memory;
++ }
++ LockVideoMemory;
++
++ /* gcvHAL_UNLOCK_VIDEO_MEMORY */
++ struct _gcsHAL_UNLOCK_VIDEO_MEMORY
++ {
++ /* Allocated video memory in gcuVIDMEM_NODE. */
++ IN gctUINT64 node;
++
++ /* Type of surface. */
++ IN gceSURF_TYPE type;
++
++ /* Flag to unlock surface asynchroneously. */
++ IN OUT gctBOOL asynchroneous;
++ }
++ UnlockVideoMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateNonPagedMemory;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_NON_PAGED_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeNonPagedMemory;
++
++ /* gcvHAL_ALLOCATE_NON_PAGED_MEMORY */
++ struct _gcsHAL_ALLOCATE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateVirtualCommandBuffer;
++
++ /* gcvHAL_FREE_NON_PAGED_MEMORY */
++ struct _gcsHAL_FREE_VIRTUAL_COMMAND_BUFFER
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeVirtualCommandBuffer;
++
++ /* gcvHAL_EVENT_COMMIT. */
++ struct _gcsHAL_EVENT_COMMIT
++ {
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++ }
++ Event;
++
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_COMMIT
++ {
++ /* Context buffer object gckCONTEXT. */
++ IN gctUINT64 context;
++
++ /* Command buffer gcoCMDBUF. */
++ IN gctUINT64 commandBuffer;
++
++ /* State delta buffer in gcsSTATE_DELTA. */
++ gctUINT64 delta;
++
++ /* Event queue in gcsQUEUE. */
++ IN gctUINT64 queue;
++ }
++ Commit;
++
++ /* gcvHAL_MAP_USER_MEMORY */
++ struct _gcsHAL_MAP_USER_MEMORY
++ {
++ /* Base address of user memory to map. */
++ IN gctUINT64 memory;
++
++ /* Physical address of user memory to map. */
++ IN gctUINT32 physical;
++
++ /* Size of user memory in bytes to map. */
++ IN gctUINT64 size;
++
++ /* Info record required by gcvHAL_UNMAP_USER_MEMORY. Just a name. */
++ OUT gctUINT32 info;
++
++ /* Physical address of mapped memory. */
++ OUT gctUINT32 address;
++ }
++ MapUserMemory;
++
++ /* gcvHAL_UNMAP_USER_MEMORY */
++ struct _gcsHAL_UNMAP_USER_MEMORY
++ {
++ /* Base address of user memory to unmap. */
++ IN gctUINT64 memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctUINT64 size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. Just a name. */
++ IN gctUINT32 info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++ }
++ UnmapUserMemory;
++#if !USE_NEW_LINUX_SIGNAL
++ /* gcsHAL_USER_SIGNAL */
++ struct _gcsHAL_USER_SIGNAL
++ {
++ /* Command. */
++ gceUSER_SIGNAL_COMMAND_CODES command;
++
++ /* Signal ID. */
++ IN OUT gctINT id;
++
++ /* Reset mode. */
++ IN gctBOOL manualReset;
++
++ /* Wait timedout. */
++ IN gctUINT32 wait;
++
++ /* State. */
++ IN gctBOOL state;
++ }
++ UserSignal;
++#endif
++
++ /* gcvHAL_SIGNAL. */
++ struct _gcsHAL_SIGNAL
++ {
++ /* Signal handle to signal gctSIGNAL. */
++ IN gctUINT64 signal;
++
++ /* Reserved gctSIGNAL. */
++ IN gctUINT64 auxSignal;
++
++ /* Process owning the signal gctHANDLE. */
++ IN gctUINT64 process;
++
++#if defined(__QNXNTO__)
++ /* Client pulse side-channel connection ID. Set by client in gcoOS_CreateSignal. */
++ IN gctINT32 coid;
++
++ /* Set by server. */
++ IN gctINT32 rcvid;
++#endif
++ /* Event generated from where of pipeline */
++ IN gceKERNEL_WHERE fromWhere;
++ }
++ Signal;
++
++ /* gcvHAL_WRITE_DATA. */
++ struct _gcsHAL_WRITE_DATA
++ {
++ /* Address to write data to. */
++ IN gctUINT32 address;
++
++ /* Data to write. */
++ IN gctUINT32 data;
++ }
++ WriteData;
++
++ /* gcvHAL_ALLOCATE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_ALLOCATE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes to allocate. */
++ IN OUT gctUINT64 bytes;
++
++ /* Hardware address of allocation. */
++ OUT gctUINT32 address;
++
++ /* Physical address of allocation. Just a name. */
++ OUT gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ OUT gctUINT64 logical;
++ }
++ AllocateContiguousMemory;
++
++ /* gcvHAL_FREE_CONTIGUOUS_MEMORY */
++ struct _gcsHAL_FREE_CONTIGUOUS_MEMORY
++ {
++ /* Number of bytes allocated. */
++ IN gctUINT64 bytes;
++
++ /* Physical address of allocation. Just a name. */
++ IN gctUINT32 physical;
++
++ /* Logical address of allocation. */
++ IN gctUINT64 logical;
++ }
++ FreeContiguousMemory;
++
++ /* gcvHAL_READ_REGISTER */
++ struct _gcsHAL_READ_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ OUT gctUINT32 data;
++ }
++ ReadRegisterData;
++
++ /* gcvHAL_WRITE_REGISTER */
++ struct _gcsHAL_WRITE_REGISTER
++ {
++ /* Logical address of memory to write data to. */
++ IN gctUINT32 address;
++
++ /* Data read. */
++ IN gctUINT32 data;
++ }
++ WriteRegisterData;
++
++#if VIVANTE_PROFILER
++ /* gcvHAL_GET_PROFILE_SETTING */
++ struct _gcsHAL_GET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ OUT gctBOOL enable;
++
++ /* The profile file name */
++ OUT gctCHAR fileName[gcdMAX_PROFILE_FILE_NAME];
++ }
++ GetProfileSetting;
++
++ /* gcvHAL_SET_PROFILE_SETTING */
++ struct _gcsHAL_SET_PROFILE_SETTING
++ {
++ /* Enable profiling */
++ IN gctBOOL enable;
++
++ /* The profile file name */
++ IN gctCHAR fileName[gcdMAX_PROFILE_FILE_NAME];
++ }
++ SetProfileSetting;
++
++#if VIVANTE_PROFILER_PERDRAW
++ /* gcvHAL_READ_PROFILER_REGISTER_SETTING */
++ struct _gcsHAL_READ_PROFILER_REGISTER_SETTING
++ {
++ /*Should Clear Register*/
++ IN gctBOOL bclear;
++ }
++ SetProfilerRegisterClear;
++#endif
++
++ /* gcvHAL_READ_ALL_PROFILE_REGISTERS */
++ struct _gcsHAL_READ_ALL_PROFILE_REGISTERS
++ {
++#if VIVANTE_PROFILER_CONTEXT
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++#endif
++ /* Data read. */
++ OUT gcsPROFILER_COUNTERS counters;
++ }
++ RegisterProfileData;
++
++ /* gcvHAL_PROFILE_REGISTERS_2D */
++ struct _gcsHAL_PROFILE_REGISTERS_2D
++ {
++ /* Data read in gcs2D_PROFILE. */
++ OUT gctUINT64 hwProfile2D;
++ }
++ RegisterProfileData2D;
++#endif
++ /* Power management. */
++ /* gcvHAL_SET_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_SET_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ IN gceCHIPPOWERSTATE state;
++ }
++ SetPowerManagement;
++
++ /* gcvHAL_QUERY_POWER_MANAGEMENT_STATE */
++ struct _gcsHAL_QUERY_POWER_MANAGEMENT
++ {
++ /* Data read. */
++ OUT gceCHIPPOWERSTATE state;
++
++ /* Idle query. */
++ OUT gctBOOL isIdle;
++ }
++ QueryPowerManagement;
++
++ /* gcvHAL_QUERY_KERNEL_SETTINGS */
++ struct _gcsHAL_QUERY_KERNEL_SETTINGS
++ {
++ /* Settings.*/
++ OUT gcsKERNEL_SETTINGS settings;
++ }
++ QueryKernelSettings;
++
++ /* gcvHAL_MAP_PHYSICAL */
++ struct _gcsHAL_MAP_PHYSICAL
++ {
++ /* gcvTRUE to map, gcvFALSE to unmap. */
++ IN gctBOOL map;
++
++ /* Physical address. */
++ IN OUT gctUINT64 physical;
++ }
++ MapPhysical;
++
++ /* gcvHAL_DEBUG */
++ struct _gcsHAL_DEBUG
++ {
++ /* If gcvTRUE, set the debug information. */
++ IN gctBOOL set;
++ IN gctUINT32 level;
++ IN gctUINT32 zones;
++ IN gctBOOL enable;
++
++ IN gceDEBUG_MESSAGE_TYPE type;
++ IN gctUINT32 messageSize;
++
++ /* Message to print if not empty. */
++ IN gctCHAR message[80];
++ }
++ Debug;
++
++ /* gcvHAL_CACHE */
++ struct _gcsHAL_CACHE
++ {
++ IN gceCACHEOPERATION operation;
++ /* gctHANDLE */
++ IN gctUINT64 process;
++ IN gctUINT64 logical;
++ IN gctUINT64 bytes;
++ /* gcuVIDMEM_NODE_PTR */
++ IN gctUINT64 node;
++ }
++ Cache;
++
++ /* gcvHAL_TIMESTAMP */
++ struct _gcsHAL_TIMESTAMP
++ {
++ /* Timer select. */
++ IN gctUINT32 timer;
++
++ /* Timer request type (0-stop, 1-start, 2-send delta). */
++ IN gctUINT32 request;
++
++ /* Result of delta time in microseconds. */
++ OUT gctINT32 timeDelta;
++ }
++ TimeStamp;
++
++ /* gcvHAL_DATABASE */
++ struct _gcsHAL_DATABASE
++ {
++ /* Set to gcvTRUE if you want to query a particular process ID.
++ ** Set to gcvFALSE to query the last detached process. */
++ IN gctBOOL validProcessID;
++
++ /* Process ID to query. */
++ IN gctUINT32 processID;
++
++ /* Information. */
++ OUT gcuDATABASE_INFO vidMem;
++ OUT gcuDATABASE_INFO nonPaged;
++ OUT gcuDATABASE_INFO contiguous;
++ OUT gcuDATABASE_INFO gpuIdle;
++ }
++ Database;
++
++ /* gcvHAL_VIDMEM_DATABASE */
++ struct _gcsHAL_VIDMEM_DATABASE
++ {
++ /* Set to gcvTRUE if you want to query a particular process ID.
++ ** Set to gcvFALSE to query the last detached process. */
++ IN gctBOOL validProcessID;
++
++ /* Process ID to query. */
++ IN gctUINT32 processID;
++
++ /* Information. */
++ OUT gcuDATABASE_INFO vidMemResv;
++ OUT gcuDATABASE_INFO vidMemCont;
++ OUT gcuDATABASE_INFO vidMemVirt;
++ }
++ VidMemDatabase;
++
++ /* gcvHAL_VERSION */
++ struct _gcsHAL_VERSION
++ {
++ /* Major version: N.n.n. */
++ OUT gctINT32 major;
++
++ /* Minor version: n.N.n. */
++ OUT gctINT32 minor;
++
++ /* Patch version: n.n.N. */
++ OUT gctINT32 patch;
++
++ /* Build version. */
++ OUT gctUINT32 build;
++ }
++ Version;
++
++ /* gcvHAL_CHIP_INFO */
++ struct _gcsHAL_CHIP_INFO
++ {
++ /* Chip count. */
++ OUT gctINT32 count;
++
++ /* Chip types. */
++ OUT gceHARDWARE_TYPE types[gcdCHIP_COUNT];
++ }
++ ChipInfo;
++
++ /* gcvHAL_ATTACH */
++ struct _gcsHAL_ATTACH
++ {
++ /* Context buffer object gckCONTEXT. Just a name. */
++ OUT gctUINT32 context;
++
++ /* Number of states in the buffer. */
++ OUT gctUINT64 stateCount;
++ }
++ Attach;
++
++ /* gcvHAL_DETACH */
++ struct _gcsHAL_DETACH
++ {
++ /* Context buffer object gckCONTEXT. Just a name. */
++ IN gctUINT32 context;
++ }
++ Detach;
++
++ /* gcvHAL_COMPOSE. */
++ gcsHAL_COMPOSE Compose;
++
++ /* gcvHAL_GET_FRAME_INFO. */
++ struct _gcsHAL_GET_FRAME_INFO
++ {
++ /* gcsHAL_FRAME_INFO* */
++ OUT gctUINT64 frameInfo;
++ }
++ GetFrameInfo;
++
++ /* gcvHAL_SET_TIME_OUT. */
++ struct _gcsHAL_SET_TIMEOUT
++ {
++ gctUINT32 timeOut;
++ }
++ SetTimeOut;
++
++#if gcdENABLE_VG
++ /* gcvHAL_COMMIT */
++ struct _gcsHAL_VGCOMMIT
++ {
++ /* Context buffer in gcsVGCONTEXT. */
++ IN gctUINT64 context;
++
++ /* Command queue in gcsVGCMDQUEUE. */
++ IN gctUINT64 queue;
++
++ /* Number of entries in the queue. */
++ IN gctUINT entryCount;
++
++ /* Task table in gcsTASK_MASTER_TABLE. */
++ IN gctUINT64 taskTable;
++ }
++ VGCommit;
++
++ /* gcvHAL_QUERY_COMMAND_BUFFER */
++ struct _gcsHAL_QUERY_COMMAND_BUFFER
++ {
++ /* Command buffer attributes. */
++ OUT gcsCOMMAND_BUFFER_INFO information;
++ }
++ QueryCommandBuffer;
++
++#endif
++
++ struct _gcsHAL_GET_SHARED_INFO
++ {
++ /* Process id. */
++ IN gctUINT32 pid;
++
++ /* Data id. */
++ IN gctUINT32 dataId;
++
++ /* Data size. */
++ IN gctSIZE_T bytes;
++
++ /* Pointer to save the shared data. */
++ OUT gctPOINTER data;
++ }
++ GetSharedInfo;
++
++ struct _gcsHAL_SET_SHARED_INFO
++ {
++ /* Data id. */
++ IN gctUINT32 dataId;
++
++ /* Data to be shared. */
++ IN gctPOINTER data;
++
++ /* Data size. */
++ IN gctSIZE_T bytes;
++ }
++ SetSharedInfo;
++
++ struct _gcsHAL_SET_FSCALE_VALUE
++ {
++ IN gctUINT value;
++ }
++ SetFscaleValue;
++
++ struct _gcsHAL_GET_FSCALE_VALUE
++ {
++ OUT gctUINT value;
++ OUT gctUINT minValue;
++ OUT gctUINT maxValue;
++ }
++ GetFscaleValue;
++
++ struct _gcsHAL_QUERY_RESET_TIME_STAMP
++ {
++ OUT gctUINT64 timeStamp;
++ }
++ QueryResetTimeStamp;
++
++ struct _gcsHAL_SYNC_POINT
++ {
++ /* Command. */
++ gceSYNC_POINT_COMMAND_CODES command;
++
++ /* Sync point. */
++ IN OUT gctUINT64 syncPoint;
++
++ /* From where. */
++ IN gceKERNEL_WHERE fromWhere;
++
++ /* Signaled state. */
++ OUT gctBOOL state;
++ }
++ SyncPoint;
++
++ struct _gcsHAL_CREATE_NATIVE_FENCE
++ {
++ /* Signal id to dup. */
++ IN gctUINT64 syncPoint;
++
++ /* Native fence file descriptor. */
++ OUT gctINT fenceFD;
++
++ }
++ CreateNativeFence;
++ }
++ u;
++}
++gcsHAL_INTERFACE;
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_driver_vg.h 2015-07-27 23:13:06.210822785 +0200
+@@ -0,0 +1,270 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_driver_vg_h_
++#define __gc_hal_driver_vg_h_
++
++
++
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* I/O Control Codes ******************************
++\******************************************************************************/
++
++#define gcvHAL_CLASS "galcore"
++#define IOCTL_GCHAL_INTERFACE 30000
++
++/******************************************************************************\
++********************************* Command Codes ********************************
++\******************************************************************************/
++
++/******************************************************************************\
++********************* Command buffer information structure. ********************
++\******************************************************************************/
++
++typedef struct _gcsCOMMAND_BUFFER_INFO * gcsCOMMAND_BUFFER_INFO_PTR;
++typedef struct _gcsCOMMAND_BUFFER_INFO
++{
++ /* FE command buffer interrupt ID. */
++ gctINT32 feBufferInt;
++
++ /* TS overflow interrupt ID. */
++ gctINT32 tsOverflowInt;
++
++ /* Alignment and mask for the buffer address. */
++ gctUINT addressMask;
++ gctSIZE_T addressAlignment;
++
++ /* Alignment for each command. */
++ gctSIZE_T commandAlignment;
++
++ /* Number of bytes required by the STATE command. */
++ gctSIZE_T stateCommandSize;
++
++ /* Number of bytes required by the RESTART command. */
++ gctSIZE_T restartCommandSize;
++
++ /* Number of bytes required by the FETCH command. */
++ gctSIZE_T fetchCommandSize;
++
++ /* Number of bytes required by the CALL command. */
++ gctSIZE_T callCommandSize;
++
++ /* Number of bytes required by the RETURN command. */
++ gctSIZE_T returnCommandSize;
++
++ /* Number of bytes required by the EVENT command. */
++ gctSIZE_T eventCommandSize;
++
++ /* Number of bytes required by the END command. */
++ gctSIZE_T endCommandSize;
++
++ /* Number of bytes reserved at the tail of a static command buffer. */
++ gctSIZE_T staticTailSize;
++
++ /* Number of bytes reserved at the tail of a dynamic command buffer. */
++ gctSIZE_T dynamicTailSize;
++}
++gcsCOMMAND_BUFFER_INFO;
++
++/******************************************************************************\
++******************************** Task Structures *******************************
++\******************************************************************************/
++
++typedef enum _gceTASK
++{
++ gcvTASK_LINK,
++ gcvTASK_CLUSTER,
++ gcvTASK_INCREMENT,
++ gcvTASK_DECREMENT,
++ gcvTASK_SIGNAL,
++ gcvTASK_LOCKDOWN,
++ gcvTASK_UNLOCK_VIDEO_MEMORY,
++ gcvTASK_FREE_VIDEO_MEMORY,
++ gcvTASK_FREE_CONTIGUOUS_MEMORY,
++ gcvTASK_UNMAP_USER_MEMORY
++}
++gceTASK;
++
++typedef struct _gcsTASK_HEADER * gcsTASK_HEADER_PTR;
++typedef struct _gcsTASK_HEADER
++{
++ /* Task ID. */
++ IN gceTASK id;
++}
++gcsTASK_HEADER;
++
++typedef struct _gcsTASK_LINK * gcsTASK_LINK_PTR;
++typedef struct _gcsTASK_LINK
++{
++ /* Task ID (gcvTASK_LINK). */
++ IN gceTASK id;
++
++ /* Pointer to the next task container. */
++ IN gctPOINTER cotainer;
++
++ /* Pointer to the next task from the next task container. */
++ IN gcsTASK_HEADER_PTR task;
++}
++gcsTASK_LINK;
++
++typedef struct _gcsTASK_CLUSTER * gcsTASK_CLUSTER_PTR;
++typedef struct _gcsTASK_CLUSTER
++{
++ /* Task ID (gcvTASK_CLUSTER). */
++ IN gceTASK id;
++
++ /* Number of tasks in the cluster. */
++ IN gctUINT taskCount;
++}
++gcsTASK_CLUSTER;
++
++typedef struct _gcsTASK_INCREMENT * gcsTASK_INCREMENT_PTR;
++typedef struct _gcsTASK_INCREMENT
++{
++ /* Task ID (gcvTASK_INCREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to increment. */
++ IN gctUINT32 address;
++}
++gcsTASK_INCREMENT;
++
++typedef struct _gcsTASK_DECREMENT * gcsTASK_DECREMENT_PTR;
++typedef struct _gcsTASK_DECREMENT
++{
++ /* Task ID (gcvTASK_DECREMENT). */
++ IN gceTASK id;
++
++ /* Address of the variable to decrement. */
++ IN gctUINT32 address;
++}
++gcsTASK_DECREMENT;
++
++typedef struct _gcsTASK_SIGNAL * gcsTASK_SIGNAL_PTR;
++typedef struct _gcsTASK_SIGNAL
++{
++ /* Task ID (gcvTASK_SIGNAL). */
++ IN gceTASK id;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ IN gctINT32 coid;
++ IN gctINT32 rcvid;
++#endif
++}
++gcsTASK_SIGNAL;
++
++typedef struct _gcsTASK_LOCKDOWN * gcsTASK_LOCKDOWN_PTR;
++typedef struct _gcsTASK_LOCKDOWN
++{
++ /* Task ID (gcvTASK_LOCKDOWN). */
++ IN gceTASK id;
++
++ /* Address of the user space counter. */
++ IN gctUINT32 userCounter;
++
++ /* Address of the kernel space counter. */
++ IN gctUINT32 kernelCounter;
++
++ /* Process owning the signal. */
++ IN gctHANDLE process;
++
++ /* Signal handle to signal. */
++ IN gctSIGNAL signal;
++}
++gcsTASK_LOCKDOWN;
++
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY * gcsTASK_UNLOCK_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_UNLOCK_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_UNLOCK_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++}
++gcsTASK_UNLOCK_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY * gcsTASK_FREE_VIDEO_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_VIDEO_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_VIDEO_MEMORY). */
++ IN gceTASK id;
++
++ /* Allocated video memory. */
++ IN gctUINT64 node;
++}
++gcsTASK_FREE_VIDEO_MEMORY;
++
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY * gcsTASK_FREE_CONTIGUOUS_MEMORY_PTR;
++typedef struct _gcsTASK_FREE_CONTIGUOUS_MEMORY
++{
++ /* Task ID (gcvTASK_FREE_CONTIGUOUS_MEMORY). */
++ IN gceTASK id;
++
++ /* Number of bytes allocated. */
++ IN gctSIZE_T bytes;
++
++ /* Physical address of allocation. */
++ IN gctPHYS_ADDR physical;
++
++ /* Logical address of allocation. */
++ IN gctPOINTER logical;
++}
++gcsTASK_FREE_CONTIGUOUS_MEMORY;
++
++typedef struct _gcsTASK_UNMAP_USER_MEMORY * gcsTASK_UNMAP_USER_MEMORY_PTR;
++typedef struct _gcsTASK_UNMAP_USER_MEMORY
++{
++ /* Task ID (gcvTASK_UNMAP_USER_MEMORY). */
++ IN gceTASK id;
++
++ /* Base address of user memory to unmap. */
++ IN gctPOINTER memory;
++
++ /* Size of user memory in bytes to unmap. */
++ IN gctSIZE_T size;
++
++ /* Info record returned by gcvHAL_MAP_USER_MEMORY. */
++ IN gctPOINTER info;
++
++ /* Physical address of mapped memory as returned by
++ gcvHAL_MAP_USER_MEMORY. */
++ IN gctUINT32 address;
++}
++gcsTASK_UNMAP_USER_MEMORY;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_driver_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_dump.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_dump.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_dump.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_dump.h 2015-07-27 23:13:06.210822785 +0200
+@@ -0,0 +1,88 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_dump_h_
++#define __gc_hal_dump_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*
++** FILE LAYOUT:
++**
++** gcsDUMP_FILE structure
++**
++** gcsDUMP_DATA frame
++** gcsDUMP_DATA or gcDUMP_DATA_SIZE records rendingring the frame
++** gctUINT8 data[length]
++*/
++
++#define gcvDUMP_FILE_SIGNATURE gcmCC('g','c','D','B')
++
++typedef struct _gcsDUMP_FILE
++{
++ gctUINT32 signature; /* File signature */
++ gctSIZE_T length; /* Length of file */
++ gctUINT32 frames; /* Number of frames in file */
++}
++gcsDUMP_FILE;
++
++typedef enum _gceDUMP_TAG
++{
++ gcvTAG_SURFACE = gcmCC('s','u','r','f'),
++ gcvTAG_FRAME = gcmCC('f','r','m',' '),
++ gcvTAG_COMMAND = gcmCC('c','m','d',' '),
++ gcvTAG_INDEX = gcmCC('i','n','d','x'),
++ gcvTAG_STREAM = gcmCC('s','t','r','m'),
++ gcvTAG_TEXTURE = gcmCC('t','e','x','t'),
++ gcvTAG_RENDER_TARGET = gcmCC('r','n','d','r'),
++ gcvTAG_DEPTH = gcmCC('z','b','u','f'),
++ gcvTAG_RESOLVE = gcmCC('r','s','l','v'),
++ gcvTAG_DELETE = gcmCC('d','e','l',' '),
++}
++gceDUMP_TAG;
++
++typedef struct _gcsDUMP_SURFACE
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctUINT32 address; /* Address of the surface. */
++ gctINT16 width; /* Width of surface. */
++ gctINT16 height; /* Height of surface. */
++ gceSURF_FORMAT format; /* Surface pixel format. */
++ gctSIZE_T length; /* Number of bytes inside the surface. */
++}
++gcsDUMP_SURFACE;
++
++typedef struct _gcsDUMP_DATA
++{
++ gceDUMP_TAG type; /* Type of record. */
++ gctSIZE_T length; /* Number of bytes of data. */
++ gctUINT32 address; /* Address for the data. */
++}
++gcsDUMP_DATA;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_dump_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_eglplatform.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_eglplatform.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_eglplatform.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_eglplatform.h 2015-07-27 23:13:06.210822785 +0200
+@@ -0,0 +1,627 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_eglplatform_h_
++#define __gc_hal_eglplatform_h_
++
++/* Include VDK types. */
++#include "gc_hal_types.h"
++#include "gc_hal_base.h"
++#include "gc_hal_eglplatform_type.h"
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#if defined(_WIN32) || defined(__VC32__) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__)
++/* Win32 and Windows CE platforms. */
++#include <windows.h>
++typedef HDC HALNativeDisplayType;
++typedef HWND HALNativeWindowType;
++typedef HBITMAP HALNativePixmapType;
++
++typedef struct __BITFIELDINFO{
++ BITMAPINFO bmi;
++ RGBQUAD bmiColors[2];
++} BITFIELDINFO;
++
++#elif defined(LINUX) && defined(EGL_API_DFB) && !defined(__APPLE__)
++#include <directfb.h>
++typedef struct _DFBDisplay * HALNativeDisplayType;
++typedef struct _DFBWindow * HALNativeWindowType;
++typedef struct _DFBPixmap * HALNativePixmapType;
++
++#elif defined(LINUX) && defined(EGL_API_FB) && !defined(__APPLE__)
++
++#if defined(EGL_API_WL)
++/* Wayland platform. */
++#include "wayland-server.h"
++#include <wayland-egl.h>
++
++#define WL_EGL_NUM_BACKBUFFERS 3
++
++typedef struct _gcsWL_VIV_BUFFER
++{
++ struct wl_resource *wl_buffer;
++ gcoSURF surface;
++ gctINT32 width, height;
++} gcsWL_VIV_BUFFER;
++
++typedef struct _gcsWL_EGL_DISPLAY
++{
++ struct wl_display* wl_display;
++ struct wl_viv* wl_viv;
++ struct wl_registry *registry;
++ struct wl_event_queue *wl_queue;
++} gcsWL_EGL_DISPLAY;
++
++typedef struct _gcsWL_EGL_BUFFER_INFO
++{
++ gctINT32 width;
++ gctINT32 height;
++ gctINT32 stride;
++ gceSURF_FORMAT format;
++ gcuVIDMEM_NODE_PTR node;
++ gcePOOL pool;
++ gctUINT bytes;
++ gcoSURF surface;
++ gcoSURF attached_surface;
++ gctINT32 invalidate;
++ gctBOOL locked;
++} gcsWL_EGL_BUFFER_INFO;
++
++typedef struct _gcsWL_EGL_BUFFER
++{
++ struct wl_buffer* wl_buffer;
++ gcsWL_EGL_BUFFER_INFO info;
++} gcsWL_EGL_BUFFER;
++
++typedef struct _gcsWL_EGL_WINDOW_INFO
++{
++ gctINT32 dx;
++ gctINT32 dy;
++ gctUINT width;
++ gctUINT height;
++ gctINT32 attached_width;
++ gctINT32 attached_height;
++ gceSURF_FORMAT format;
++ gctUINT bpp;
++} gcsWL_EGL_WINDOW_INFO;
++
++struct wl_egl_window
++{
++ gcsWL_EGL_DISPLAY* display;
++ gcsWL_EGL_BUFFER backbuffers[WL_EGL_NUM_BACKBUFFERS];
++ gcsWL_EGL_WINDOW_INFO info;
++ gctUINT current;
++ struct wl_surface* surface;
++ struct wl_callback* frame_callback;
++};
++
++typedef void* HALNativeDisplayType;
++typedef void* HALNativeWindowType;
++typedef void* HALNativePixmapType;
++#else
++/* Linux platform for FBDEV. */
++typedef struct _FBDisplay * HALNativeDisplayType;
++typedef struct _FBWindow * HALNativeWindowType;
++typedef struct _FBPixmap * HALNativePixmapType;
++#endif
++#elif defined(__ANDROID__) || defined(ANDROID)
++
++struct egl_native_pixmap_t;
++
++#if ANDROID_SDK_VERSION >= 9
++ #include <android/native_window.h>
++
++ typedef struct ANativeWindow* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t* HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#else
++ struct android_native_window_t;
++ typedef struct android_native_window_t* HALNativeWindowType;
++ typedef struct egl_native_pixmap_t * HALNativePixmapType;
++ typedef void* HALNativeDisplayType;
++#endif
++
++#elif defined(LINUX) || defined(__APPLE__)
++/* X11 platform. */
++#include <X11/Xlib.h>
++#include <X11/Xutil.h>
++
++typedef Display * HALNativeDisplayType;
++typedef Window HALNativeWindowType;
++
++#ifdef CUSTOM_PIXMAP
++typedef void * HALNativePixmapType;
++#else
++typedef Pixmap HALNativePixmapType;
++#endif /* CUSTOM_PIXMAP */
++
++/* Rename some badly named X defines. */
++#ifdef Status
++# define XStatus int
++# undef Status
++#endif
++#ifdef Always
++# define XAlways 2
++# undef Always
++#endif
++#ifdef CurrentTime
++# undef CurrentTime
++# define XCurrentTime 0
++#endif
++
++#elif defined(__QNXNTO__)
++#include <screen/screen.h>
++
++/* VOID */
++typedef int HALNativeDisplayType;
++typedef screen_window_t HALNativeWindowType;
++typedef screen_pixmap_t HALNativePixmapType;
++
++#else
++
++#error "Platform not recognized"
++
++/* VOID */
++typedef void * HALNativeDisplayType;
++typedef void * HALNativeWindowType;
++typedef void * HALNativePixmapType;
++
++#endif
++
++/* define DUMMY according to the system */
++#if defined(EGL_API_WL)
++# define WL_DUMMY (31415926)
++# define EGL_DUMMY WL_DUMMY
++#elif defined(__ANDROID__) || defined(ANDROID)
++# define ANDROID_DUMMY (31415926)
++# define EGL_DUMMY ANDROID_DUMMY
++#else
++# define EGL_DUMMY (31415926)
++#endif
++
++/*******************************************************************************
++** Display. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_GetDisplay(
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayByIndex(
++ IN gctINT DisplayIndex,
++ OUT HALNativeDisplayType * Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfo(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctSIZE_T * Physical,
++ OUT gctINT * Stride,
++ OUT gctINT * BitsPerPixel
++ );
++
++
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetNextDisplayInfoExByIndex(
++ IN gctINT Index,
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ OUT gctINT * Width,
++ OUT gctINT * Height
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbuffer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtual(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetDisplayVirtualEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER Context,
++ IN gcoSURF Surface,
++ IN gctUINT Offset,
++ IN gctINT X,
++ IN gctINT Y
++ );
++
++gceSTATUS
++gcoOS_SetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT Interval
++);
++
++gceSTATUS
++gcoOS_GetSwapInterval(
++ IN HALNativeDisplayType Display,
++ IN gctINT_PTR Min,
++ IN gctINT_PTR Max
++);
++
++gceSTATUS
++gcoOS_DisplayBufferRegions(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT NumRects,
++ IN gctINT_PTR Rects
++ );
++
++gceSTATUS
++gcoOS_DestroyDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_InitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_DeinitLocalDisplayInfo(
++ IN HALNativeDisplayType Display,
++ IN OUT gctPOINTER * localDisplay
++ );
++
++gceSTATUS
++gcoOS_GetDisplayInfoEx2(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ IN gctUINT DisplayInfoSize,
++ OUT halDISPLAY_INFO * DisplayInfo
++ );
++
++gceSTATUS
++gcoOS_GetDisplayBackbufferEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctPOINTER localDisplay,
++ OUT gctPOINTER * context,
++ OUT gcoSURF * surface,
++ OUT gctUINT * Offset,
++ OUT gctINT * X,
++ OUT gctINT * Y
++ );
++
++gceSTATUS
++gcoOS_IsValidDisplay(
++ IN HALNativeDisplayType Display
++ );
++
++gceSTATUS
++gcoOS_GetNativeVisualId(
++ IN HALNativeDisplayType Display,
++ OUT gctINT* nativeVisualId
++ );
++
++gctBOOL
++gcoOS_SynchronousFlip(
++ IN HALNativeDisplayType Display
++ );
++
++/*******************************************************************************
++** Windows. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreateWindow(
++ IN HALNativeDisplayType Display,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height,
++ OUT HALNativeWindowType * Window
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset
++ );
++
++gceSTATUS
++gcoOS_DestroyWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_DrawImage(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_GetImage(
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ OUT gctINT * BitsPerPixel,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_GetWindowInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT gctINT * X,
++ OUT gctINT * Y,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctUINT * Offset,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_DrawImageEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits,
++ IN gceSURF_FORMAT Format
++ );
++
++/*******************************************************************************
++** Pixmaps. ********************************************************************
++*/
++
++gceSTATUS
++gcoOS_CreatePixmap(
++ IN HALNativeDisplayType Display,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ OUT HALNativePixmapType * Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfo(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DrawPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT BitsPerPixel,
++ IN gctPOINTER Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyPixmap(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap
++ );
++
++gceSTATUS
++gcoOS_GetPixmapInfoEx(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * BitsPerPixel,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits,
++ OUT gceSURF_FORMAT * Format
++ );
++
++gceSTATUS
++gcoOS_CopyPixmapBits(
++ IN HALNativeDisplayType Display,
++ IN HALNativePixmapType Pixmap,
++ IN gctUINT DstWidth,
++ IN gctUINT DstHeight,
++ IN gctINT DstStride,
++ IN gceSURF_FORMAT DstFormat,
++ OUT gctPOINTER DstBits
++ );
++
++/*******************************************************************************
++** OS relative. ****************************************************************
++*/
++gceSTATUS
++gcoOS_LoadEGLLibrary(
++ OUT gctHANDLE * Handle
++ );
++
++gceSTATUS
++gcoOS_FreeEGLLibrary(
++ IN gctHANDLE Handle
++ );
++
++gceSTATUS
++gcoOS_ShowWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_HideWindow(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_SetWindowTitle(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ IN gctCONST_STRING Title
++ );
++
++gceSTATUS
++gcoOS_CapturePointer(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window
++ );
++
++gceSTATUS
++gcoOS_GetEvent(
++ IN HALNativeDisplayType Display,
++ IN HALNativeWindowType Window,
++ OUT halEvent * Event
++ );
++
++gceSTATUS
++gcoOS_CreateClientBuffer(
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctINT Format,
++ IN gctINT Type,
++ OUT gctPOINTER * ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_GetClientBufferInfo(
++ IN gctPOINTER ClientBuffer,
++ OUT gctINT * Width,
++ OUT gctINT * Height,
++ OUT gctINT * Stride,
++ OUT gctPOINTER * Bits
++ );
++
++gceSTATUS
++gcoOS_DestroyClientBuffer(
++ IN gctPOINTER ClientBuffer
++ );
++
++gceSTATUS
++gcoOS_DestroyContext(
++ IN gctPOINTER Display,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_CreateContext(
++ IN gctPOINTER LocalDisplay,
++ IN gctPOINTER Context
++ );
++
++gceSTATUS
++gcoOS_MakeCurrent(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType DrawDrawable,
++ IN HALNativeWindowType ReadDrawable,
++ IN gctPOINTER Context,
++ IN gcoSURF ResolveTarget
++ );
++
++gceSTATUS
++gcoOS_CreateDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++
++gceSTATUS
++gcoOS_DestroyDrawable(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable
++ );
++gceSTATUS
++gcoOS_SwapBuffers(
++ IN gctPOINTER LocalDisplay,
++ IN HALNativeWindowType Drawable,
++ IN gcoSURF RenderTarget,
++ IN gcoSURF ResolveTarget,
++ IN gctPOINTER ResolveBits,
++ OUT gctUINT *Width,
++ OUT gctUINT *Height
++ );
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_eglplatform_type.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_eglplatform_type.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_eglplatform_type.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_eglplatform_type.h 2015-07-27 23:13:06.210822785 +0200
+@@ -0,0 +1,286 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_eglplatform_type_h_
++#define __gc_hal_eglplatform_type_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Events. *********************************************************************
++*/
++
++typedef enum _halEventType
++{
++ /* Keyboard event. */
++ HAL_KEYBOARD,
++
++ /* Mouse move event. */
++ HAL_POINTER,
++
++ /* Mouse button event. */
++ HAL_BUTTON,
++
++ /* Application close event. */
++ HAL_CLOSE,
++
++ /* Application window has been updated. */
++ HAL_WINDOW_UPDATE
++}
++halEventType;
++
++/* Scancodes for keyboard. */
++typedef enum _halKeys
++{
++ HAL_UNKNOWN = -1,
++
++ HAL_BACKSPACE = 0x08,
++ HAL_TAB,
++ HAL_ENTER = 0x0D,
++ HAL_ESCAPE = 0x1B,
++
++ HAL_SPACE = 0x20,
++ HAL_SINGLEQUOTE = 0x27,
++ HAL_PAD_ASTERISK = 0x2A,
++ HAL_COMMA = 0x2C,
++ HAL_HYPHEN,
++ HAL_PERIOD,
++ HAL_SLASH,
++ HAL_0,
++ HAL_1,
++ HAL_2,
++ HAL_3,
++ HAL_4,
++ HAL_5,
++ HAL_6,
++ HAL_7,
++ HAL_8,
++ HAL_9,
++ HAL_SEMICOLON = 0x3B,
++ HAL_EQUAL = 0x3D,
++ HAL_A = 0x41,
++ HAL_B,
++ HAL_C,
++ HAL_D,
++ HAL_E,
++ HAL_F,
++ HAL_G,
++ HAL_H,
++ HAL_I,
++ HAL_J,
++ HAL_K,
++ HAL_L,
++ HAL_M,
++ HAL_N,
++ HAL_O,
++ HAL_P,
++ HAL_Q,
++ HAL_R,
++ HAL_S,
++ HAL_T,
++ HAL_U,
++ HAL_V,
++ HAL_W,
++ HAL_X,
++ HAL_Y,
++ HAL_Z,
++ HAL_LBRACKET,
++ HAL_BACKSLASH,
++ HAL_RBRACKET,
++ HAL_BACKQUOTE = 0x60,
++
++ HAL_F1 = 0x80,
++ HAL_F2,
++ HAL_F3,
++ HAL_F4,
++ HAL_F5,
++ HAL_F6,
++ HAL_F7,
++ HAL_F8,
++ HAL_F9,
++ HAL_F10,
++ HAL_F11,
++ HAL_F12,
++
++ HAL_LCTRL,
++ HAL_RCTRL,
++ HAL_LSHIFT,
++ HAL_RSHIFT,
++ HAL_LALT,
++ HAL_RALT,
++ HAL_CAPSLOCK,
++ HAL_NUMLOCK,
++ HAL_SCROLLLOCK,
++ HAL_PAD_0,
++ HAL_PAD_1,
++ HAL_PAD_2,
++ HAL_PAD_3,
++ HAL_PAD_4,
++ HAL_PAD_5,
++ HAL_PAD_6,
++ HAL_PAD_7,
++ HAL_PAD_8,
++ HAL_PAD_9,
++ HAL_PAD_HYPHEN,
++ HAL_PAD_PLUS,
++ HAL_PAD_SLASH,
++ HAL_PAD_PERIOD,
++ HAL_PAD_ENTER,
++ HAL_SYSRQ,
++ HAL_PRNTSCRN,
++ HAL_BREAK,
++ HAL_UP,
++ HAL_LEFT,
++ HAL_RIGHT,
++ HAL_DOWN,
++ HAL_HOME,
++ HAL_END,
++ HAL_PGUP,
++ HAL_PGDN,
++ HAL_INSERT,
++ HAL_DELETE,
++ HAL_LWINDOW,
++ HAL_RWINDOW,
++ HAL_MENU,
++ HAL_POWER,
++ HAL_SLEEP,
++ HAL_WAKE
++}
++halKeys;
++
++/* Structure that defined keyboard mapping. */
++typedef struct _halKeyMap
++{
++ /* Normal key. */
++ halKeys normal;
++
++ /* Extended key. */
++ halKeys extended;
++}
++halKeyMap;
++
++/* Event structure. */
++typedef struct _halEvent
++{
++ /* Event type. */
++ halEventType type;
++
++ /* Event data union. */
++ union _halEventData
++ {
++ /* Event data for keyboard. */
++ struct _halKeyboard
++ {
++ /* Scancode. */
++ halKeys scancode;
++
++ /* ASCII characte of the key pressed. */
++ char key;
++
++ /* Flag whether the key was pressed (1) or released (0). */
++ char pressed;
++ }
++ keyboard;
++
++ /* Event data for pointer. */
++ struct _halPointer
++ {
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ pointer;
++
++ /* Event data for mouse buttons. */
++ struct _halButton
++ {
++ /* Left button state. */
++ int left;
++
++ /* Middle button state. */
++ int middle;
++
++ /* Right button state. */
++ int right;
++
++ /* Current pointer coordinate. */
++ int x;
++ int y;
++ }
++ button;
++ }
++ data;
++}
++halEvent;
++
++/* VFK_DISPLAY_INFO structure defining information returned by
++ vdkGetDisplayInfoEx. */
++typedef struct _halDISPLAY_INFO
++{
++ /* The size of the display in pixels. */
++ int width;
++ int height;
++
++ /* The stride of the dispay. -1 is returned if the stride is not known
++ ** for the specified display.*/
++ int stride;
++
++ /* The color depth of the display in bits per pixel. */
++ int bitsPerPixel;
++
++ /* The logical pointer to the display memory buffer. NULL is returned
++ ** if the pointer is not known for the specified display. */
++ void * logical;
++
++ /* The physical address of the display memory buffer. ~0 is returned
++ ** if the address is not known for the specified display. */
++ unsigned long physical;
++
++ int wrapFB; /* true if compositor, false otherwise. */
++
++#ifndef __QNXNTO__
++ /* 355_FB_MULTI_BUFFER */
++ int multiBuffer;
++ int backBufferY;
++#endif
++
++ /* The color info of the display. */
++ unsigned int alphaLength;
++ unsigned int alphaOffset;
++ unsigned int redLength;
++ unsigned int redOffset;
++ unsigned int greenLength;
++ unsigned int greenOffset;
++ unsigned int blueLength;
++ unsigned int blueOffset;
++
++ /* Display flip support. */
++ int flip;
++}
++halDISPLAY_INFO;
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_eglplatform_type_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_engine.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_engine.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_engine.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_engine.h 2015-07-27 23:13:06.214808565 +0200
+@@ -0,0 +1,2053 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_h_
++#define __gc_hal_engine_h_
++
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++
++#if gcdENABLE_VG
++#include "gc_hal_engine_vg.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoSTREAM * gcoSTREAM;
++typedef struct _gcoVERTEX * gcoVERTEX;
++typedef struct _gcoTEXTURE * gcoTEXTURE;
++typedef struct _gcoINDEX * gcoINDEX;
++typedef struct _gcsVERTEX_ATTRIBUTES * gcsVERTEX_ATTRIBUTES_PTR;
++typedef struct _gcoVERTEXARRAY * gcoVERTEXARRAY;
++
++#define gcdATTRIBUTE_COUNT 16
++
++/******************************************************************************\
++********************************* Enumerations *********************************
++\******************************************************************************/
++
++/* Shading format. */
++typedef enum _gceSHADING
++{
++ gcvSHADING_SMOOTH,
++ gcvSHADING_FLAT_D3D,
++ gcvSHADING_FLAT_OPENGL,
++}
++gceSHADING;
++
++/* Culling modes. */
++typedef enum _gceCULL
++{
++ gcvCULL_NONE,
++ gcvCULL_CCW,
++ gcvCULL_CW,
++}
++gceCULL;
++
++/* Fill modes. */
++typedef enum _gceFILL
++{
++ gcvFILL_POINT,
++ gcvFILL_WIRE_FRAME,
++ gcvFILL_SOLID,
++}
++gceFILL;
++
++/* Compare modes. */
++typedef enum _gceCOMPARE
++{
++ gcvCOMPARE_NEVER,
++ gcvCOMPARE_NOT_EQUAL,
++ gcvCOMPARE_LESS,
++ gcvCOMPARE_LESS_OR_EQUAL,
++ gcvCOMPARE_EQUAL,
++ gcvCOMPARE_GREATER,
++ gcvCOMPARE_GREATER_OR_EQUAL,
++ gcvCOMPARE_ALWAYS,
++ gcvCOMPARE_INVALID = -1
++}
++gceCOMPARE;
++
++/* Stencil modes. */
++typedef enum _gceSTENCIL_MODE
++{
++ gcvSTENCIL_NONE,
++ gcvSTENCIL_SINGLE_SIDED,
++ gcvSTENCIL_DOUBLE_SIDED,
++}
++gceSTENCIL_MODE;
++
++/* Stencil operations. */
++typedef enum _gceSTENCIL_OPERATION
++{
++ gcvSTENCIL_KEEP,
++ gcvSTENCIL_REPLACE,
++ gcvSTENCIL_ZERO,
++ gcvSTENCIL_INVERT,
++ gcvSTENCIL_INCREMENT,
++ gcvSTENCIL_DECREMENT,
++ gcvSTENCIL_INCREMENT_SATURATE,
++ gcvSTENCIL_DECREMENT_SATURATE,
++ gcvSTENCIL_OPERATION_INVALID = -1
++}
++gceSTENCIL_OPERATION;
++
++/* Stencil selection. */
++typedef enum _gceSTENCIL_WHERE
++{
++ gcvSTENCIL_FRONT,
++ gcvSTENCIL_BACK,
++}
++gceSTENCIL_WHERE;
++
++/* Texture addressing selection. */
++typedef enum _gceTEXTURE_WHICH
++{
++ gcvTEXTURE_S,
++ gcvTEXTURE_T,
++ gcvTEXTURE_R,
++}
++gceTEXTURE_WHICH;
++
++/* Texture addressing modes. */
++typedef enum _gceTEXTURE_ADDRESSING
++{
++ gcvTEXTURE_WRAP,
++ gcvTEXTURE_CLAMP,
++ gcvTEXTURE_BORDER,
++ gcvTEXTURE_MIRROR,
++ gcvTEXTURE_MIRROR_ONCE,
++}
++gceTEXTURE_ADDRESSING;
++
++/* Texture filters. */
++typedef enum _gceTEXTURE_FILTER
++{
++ gcvTEXTURE_NONE,
++ gcvTEXTURE_POINT,
++ gcvTEXTURE_LINEAR,
++ gcvTEXTURE_ANISOTROPIC,
++}
++gceTEXTURE_FILTER;
++
++/* Primitive types. */
++typedef enum _gcePRIMITIVE
++{
++ gcvPRIMITIVE_POINT_LIST,
++ gcvPRIMITIVE_LINE_LIST,
++ gcvPRIMITIVE_LINE_STRIP,
++ gcvPRIMITIVE_LINE_LOOP,
++ gcvPRIMITIVE_TRIANGLE_LIST,
++ gcvPRIMITIVE_TRIANGLE_STRIP,
++ gcvPRIMITIVE_TRIANGLE_FAN,
++ gcvPRIMITIVE_RECTANGLE,
++}
++gcePRIMITIVE;
++
++/* Index types. */
++typedef enum _gceINDEX_TYPE
++{
++ gcvINDEX_8,
++ gcvINDEX_16,
++ gcvINDEX_32,
++}
++gceINDEX_TYPE;
++
++/******************************************************************************\
++********************************* gcoHAL Object *********************************
++\******************************************************************************/
++
++/* Query the target capabilities. */
++gceSTATUS
++gcoHAL_QueryTargetCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MultiTargetCount,
++ OUT gctUINT * MaxSamples
++ );
++
++gceSTATUS
++gcoHAL_SetDepthOnly(
++ IN gcoHAL Hal,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoHAL_QueryShaderCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoHAL_QueryTextureMaxAniso(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxAnisoValue
++ );
++
++gceSTATUS
++gcoHAL_QueryStreamCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT32 * MaxAttributes,
++ OUT gctUINT32 * MaxStreamSize,
++ OUT gctUINT32 * NumberOfStreams,
++ OUT gctUINT32 * Alignment
++ );
++
++/******************************************************************************\
++********************************* gcoSURF Object ********************************
++\******************************************************************************/
++
++/*----------------------------------------------------------------------------*/
++/*--------------------------------- gcoSURF 3D --------------------------------*/
++
++/* Copy surface. */
++gceSTATUS
++gcoSURF_Copy(
++ IN gcoSURF Surface,
++ IN gcoSURF Source
++ );
++
++/* Clear surface. */
++gceSTATUS
++gcoSURF_Clear(
++ IN gcoSURF Surface,
++ IN gctUINT Flags
++ );
++
++/* Set number of samples for a gcoSURF object. */
++gceSTATUS
++gcoSURF_SetSamples(
++ IN gcoSURF Surface,
++ IN gctUINT Samples
++ );
++
++/* Get the number of samples per pixel. */
++gceSTATUS
++gcoSURF_GetSamples(
++ IN gcoSURF Surface,
++ OUT gctUINT_PTR Samples
++ );
++
++/* Clear rectangular surface. */
++gceSTATUS
++gcoSURF_ClearRect(
++ IN gcoSURF Surface,
++ IN gctINT Left,
++ IN gctINT Top,
++ IN gctINT Right,
++ IN gctINT Bottom,
++ IN gctUINT Flags
++ );
++
++/* TO BE REMOVED */
++ gceSTATUS
++ depr_gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight
++ );
++
++ gceSTATUS
++ depr_gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 DestAddress,
++ IN gctPOINTER DestBits,
++ IN gctINT DestStride,
++ IN gceSURF_TYPE DestType,
++ IN gceSURF_FORMAT DestFormat,
++ IN gctUINT DestWidth,
++ IN gctUINT DestHeight,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resample surface. */
++gceSTATUS
++gcoSURF_Resample(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++/* Resolve surface. */
++gceSTATUS
++gcoSURF_Resolve(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface
++ );
++
++gceSTATUS
++gcoSURF_IsHWResolveable(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Resolve rectangular area of a surface. */
++gceSTATUS
++gcoSURF_ResolveRect(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsPOINT_PTR SrcOrigin,
++ IN gcsPOINT_PTR DestOrigin,
++ IN gcsPOINT_PTR RectSize
++ );
++
++/* Set surface resolvability. */
++gceSTATUS
++gcoSURF_SetResolvability(
++ IN gcoSURF Surface,
++ IN gctBOOL Resolvable
++ );
++
++gceSTATUS
++gcoSURF_IsRenderable(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSURF_IsFormatRenderableAsRT(
++ IN gcoSURF Surface
++ );
++
++#if gcdSYNC
++gceSTATUS
++gcoSURF_GetFence(
++ IN gcoSURF Surface
++ );
++gceSTATUS
++gcoSURF_WaitFence(
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoSTREAM_GetFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoSTREAM_WaitFence(
++ IN gcoSTREAM stream
++ );
++
++gceSTATUS
++gcoINDEX_GetFence(
++ IN gcoINDEX index
++ );
++
++gceSTATUS
++gcoINDEX_WaitFence(
++ IN gcoINDEX index
++ );
++#endif
++
++/******************************************************************************\
++******************************** gcoINDEX Object *******************************
++\******************************************************************************/
++
++/* Construct a new gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoINDEX * Index
++ );
++
++/* Destroy a gcoINDEX object. */
++gceSTATUS
++gcoINDEX_Destroy(
++ IN gcoINDEX Index
++ );
++
++/* Lock index in memory. */
++gceSTATUS
++gcoINDEX_Lock(
++ IN gcoINDEX Index,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Unlock index that was previously locked with gcoINDEX_Lock. */
++gceSTATUS
++gcoINDEX_Unlock(
++ IN gcoINDEX Index
++ );
++
++/* Upload index data into the memory. */
++gceSTATUS
++gcoINDEX_Load(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE IndexType,
++ IN gctUINT32 IndexCount,
++ IN gctPOINTER IndexBuffer
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_Bind(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type
++ );
++
++/* Bind an index object to the hardware. */
++gceSTATUS
++gcoINDEX_BindOffset(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset
++ );
++
++/* Free existing index buffer. */
++gceSTATUS
++gcoINDEX_Free(
++ IN gcoINDEX Index
++ );
++
++/* Upload data into an index buffer. */
++gceSTATUS
++gcoINDEX_Upload(
++ IN gcoINDEX Index,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload data into an index buffer starting at an offset. */
++gceSTATUS
++gcoINDEX_UploadOffset(
++ IN gcoINDEX Index,
++ IN gctUINT32 Offset,
++ IN gctCONST_POINTER Buffer,
++ IN gctSIZE_T Bytes
++ );
++
++/*Merge index2 to index1 from 0, index2 must subset of inex1*/
++gceSTATUS
++gcoINDEX_Merge(
++ IN gcoINDEX Index1,
++ IN gcoINDEX Index2
++ );
++
++/*check if index buffer is enough for this draw*/
++gctBOOL
++gcoINDEX_CheckRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctINT Count,
++ IN gctUINT32 Indices
++ );
++
++/* Query the index capabilities. */
++gceSTATUS
++gcoINDEX_QueryCaps(
++ OUT gctBOOL * Index8,
++ OUT gctBOOL * Index16,
++ OUT gctBOOL * Index32,
++ OUT gctUINT * MaxIndex
++ );
++
++/* Determine the index range in the current index buffer. */
++gceSTATUS
++gcoINDEX_GetIndexRange(
++ IN gcoINDEX Index,
++ IN gceINDEX_TYPE Type,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Count,
++ OUT gctUINT32 * MinimumIndex,
++ OUT gctUINT32 * MaximumIndex
++ );
++
++/* Dynamic buffer management. */
++gceSTATUS
++gcoINDEX_SetDynamic(
++ IN gcoINDEX Index,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++gceSTATUS
++gcoINDEX_UploadDynamic(
++ IN gcoINDEX Index,
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Bytes
++ );
++
++/******************************************************************************\
++********************************** gco3D Object *********************************
++\******************************************************************************/
++
++/* Clear flags. */
++typedef enum _gceCLEAR
++{
++ gcvCLEAR_COLOR = 0x1,
++ gcvCLEAR_DEPTH = 0x2,
++ gcvCLEAR_STENCIL = 0x4,
++ gcvCLEAR_HZ = 0x8,
++ gcvCLEAR_HAS_VAA = 0x10,
++}
++gceCLEAR;
++
++/* Blending targets. */
++typedef enum _gceBLEND_UNIT
++{
++ gcvBLEND_SOURCE,
++ gcvBLEND_TARGET,
++}
++gceBLEND_UNIT;
++
++/* Construct a new gco3D object. */
++gceSTATUS
++gco3D_Construct(
++ IN gcoHAL Hal,
++ OUT gco3D * Engine
++ );
++
++/* Destroy an gco3D object. */
++gceSTATUS
++gco3D_Destroy(
++ IN gco3D Engine
++ );
++
++/* Set 3D API type. */
++gceSTATUS
++gco3D_SetAPI(
++ IN gco3D Engine,
++ IN gceAPI ApiType
++ );
++
++/* Set render target. */
++gceSTATUS
++gco3D_SetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Unset render target. */
++gceSTATUS
++gco3D_UnsetTarget(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Set depth buffer. */
++gceSTATUS
++gco3D_SetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Unset depth buffer. */
++gceSTATUS
++gco3D_UnsetDepth(
++ IN gco3D Engine,
++ IN gcoSURF Surface
++ );
++
++/* Set viewport. */
++gceSTATUS
++gco3D_SetViewport(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set scissors. */
++gceSTATUS
++gco3D_SetScissors(
++ IN gco3D Engine,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom
++ );
++
++/* Set clear color. */
++gceSTATUS
++gco3D_SetClearColor(
++ IN gco3D Engine,
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++/* Set fixed point clear color. */
++gceSTATUS
++gco3D_SetClearColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point clear color. */
++gceSTATUS
++gco3D_SetClearColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set fixed point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Depth
++ );
++
++/* Set floating point clear depth. */
++gceSTATUS
++gco3D_SetClearDepthF(
++ IN gco3D Engine,
++ IN gctFLOAT Depth
++ );
++
++/* Set clear stencil. */
++gceSTATUS
++gco3D_SetClearStencil(
++ IN gco3D Engine,
++ IN gctUINT32 Stencil
++ );
++
++/* Clear a Rect sub-surface. */
++gceSTATUS
++gco3D_ClearRect(
++ IN gco3D Engine,
++ IN gctUINT32 Address,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctINT32 Left,
++ IN gctINT32 Top,
++ IN gctINT32 Right,
++ IN gctINT32 Bottom,
++ IN gctUINT32 Width,
++ IN gctUINT32 Height,
++ IN gctUINT32 Flags
++ );
++
++/* Clear surface. */
++gceSTATUS
++gco3D_Clear(
++ IN gco3D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT32 Width,
++ IN gctUINT32 Height,
++ IN gctUINT32 Flags
++ );
++
++
++/* Clear tile status. */
++gceSTATUS
++gco3D_ClearTileStatus(
++ IN gco3D Engine,
++ IN gcsSURF_INFO_PTR Surface,
++ IN gctUINT32 TileStatusAddress,
++ IN gctUINT32 Flags
++ );
++
++/* Set shading mode. */
++gceSTATUS
++gco3D_SetShading(
++ IN gco3D Engine,
++ IN gceSHADING Shading
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_EnableBlending(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set blending function. */
++gceSTATUS
++gco3D_SetBlendFunction(
++ IN gco3D Engine,
++ IN gceBLEND_UNIT Unit,
++ IN gceBLEND_FUNCTION FunctionRGB,
++ IN gceBLEND_FUNCTION FunctionAlpha
++ );
++
++/* Set blending mode. */
++gceSTATUS
++gco3D_SetBlendMode(
++ IN gco3D Engine,
++ IN gceBLEND_MODE ModeRGB,
++ IN gceBLEND_MODE ModeAlpha
++ );
++
++/* Set blending color. */
++gceSTATUS
++gco3D_SetBlendColor(
++ IN gco3D Engine,
++ IN gctUINT Red,
++ IN gctUINT Green,
++ IN gctUINT Blue,
++ IN gctUINT Alpha
++ );
++
++/* Set fixed point blending color. */
++gceSTATUS
++gco3D_SetBlendColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++/* Set floating point blending color. */
++gceSTATUS
++gco3D_SetBlendColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Set culling mode. */
++gceSTATUS
++gco3D_SetCulling(
++ IN gco3D Engine,
++ IN gceCULL Mode
++ );
++
++/* Enable point size */
++gceSTATUS
++gco3D_SetPointSizeEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set point sprite */
++gceSTATUS
++gco3D_SetPointSprite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set fill mode. */
++gceSTATUS
++gco3D_SetFill(
++ IN gco3D Engine,
++ IN gceFILL Mode
++ );
++
++/* Set depth compare mode. */
++gceSTATUS
++gco3D_SetDepthCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Enable depth writing. */
++gceSTATUS
++gco3D_EnableDepthWrite(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth mode. */
++gceSTATUS
++gco3D_SetDepthMode(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeX(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFIXED_POINT Near,
++ IN gctFIXED_POINT Far
++ );
++
++/* Set depth range. */
++gceSTATUS
++gco3D_SetDepthRangeF(
++ IN gco3D Engine,
++ IN gceDEPTH_MODE Mode,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Set last pixel enable */
++gceSTATUS
++gco3D_SetLastPixelEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set depth Bias and Scale */
++gceSTATUS
++gco3D_SetDepthScaleBiasX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT DepthScale,
++ IN gctFIXED_POINT DepthBias
++ );
++
++gceSTATUS
++gco3D_SetDepthScaleBiasF(
++ IN gco3D Engine,
++ IN gctFLOAT DepthScale,
++ IN gctFLOAT DepthBias
++ );
++
++/* Set depth near and far clipping plane. */
++gceSTATUS
++gco3D_SetDepthPlaneF(
++ IN gco3D Engine,
++ IN gctFLOAT Near,
++ IN gctFLOAT Far
++ );
++
++/* Enable or disable dithering. */
++gceSTATUS
++gco3D_EnableDither(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set color write enable bits. */
++gceSTATUS
++gco3D_SetColorWrite(
++ IN gco3D Engine,
++ IN gctUINT8 Enable
++ );
++
++/* Enable or disable early depth. */
++gceSTATUS
++gco3D_SetEarlyDepth(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Enable or disable all early depth operations. */
++gceSTATUS
++gco3D_SetAllEarlyDepthModes(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Switch dynamic early mode */
++gceSTATUS
++gco3D_SwitchDynamicEarlyDepthMode(
++ IN gco3D Engine
++ );
++
++/* Set dynamic early mode */
++gceSTATUS
++gco3D_DisableDynamicEarlyDepthMode(
++ IN gco3D Engine,
++ IN gctBOOL Disable
++ );
++
++/* Enable or disable depth-only mode. */
++gceSTATUS
++gco3D_SetDepthOnly(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++typedef struct _gcsSTENCIL_INFO * gcsSTENCIL_INFO_PTR;
++typedef struct _gcsSTENCIL_INFO
++{
++ gceSTENCIL_MODE mode;
++
++ gctUINT8 maskFront;
++ gctUINT8 maskBack;
++ gctUINT8 writeMaskFront;
++ gctUINT8 writeMaskBack;
++
++ gctUINT8 referenceFront;
++
++ gceCOMPARE compareFront;
++ gceSTENCIL_OPERATION passFront;
++ gceSTENCIL_OPERATION failFront;
++ gceSTENCIL_OPERATION depthFailFront;
++
++ gctUINT8 referenceBack;
++ gceCOMPARE compareBack;
++ gceSTENCIL_OPERATION passBack;
++ gceSTENCIL_OPERATION failBack;
++ gceSTENCIL_OPERATION depthFailBack;
++}
++gcsSTENCIL_INFO;
++
++/* Set stencil mode. */
++gceSTATUS
++gco3D_SetStencilMode(
++ IN gco3D Engine,
++ IN gceSTENCIL_MODE Mode
++ );
++
++/* Set stencil mask. */
++gceSTATUS
++gco3D_SetStencilMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back mask. */
++gceSTATUS
++gco3D_SetStencilMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMask(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil back write mask. */
++gceSTATUS
++gco3D_SetStencilWriteMaskBack(
++ IN gco3D Engine,
++ IN gctUINT8 Mask
++ );
++
++/* Set stencil reference. */
++gceSTATUS
++gco3D_SetStencilReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctBOOL Front
++ );
++
++/* Set stencil compare. */
++gceSTATUS
++gco3D_SetStencilCompare(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceCOMPARE Compare
++ );
++
++/* Set stencil operation on pass. */
++gceSTATUS
++gco3D_SetStencilPass(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on fail. */
++gceSTATUS
++gco3D_SetStencilFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set stencil operation on depth fail. */
++gceSTATUS
++gco3D_SetStencilDepthFail(
++ IN gco3D Engine,
++ IN gceSTENCIL_WHERE Where,
++ IN gceSTENCIL_OPERATION Operation
++ );
++
++/* Set all stencil states in one blow. */
++gceSTATUS
++gco3D_SetStencilAll(
++ IN gco3D Engine,
++ IN gcsSTENCIL_INFO_PTR Info
++ );
++
++typedef struct _gcsALPHA_INFO * gcsALPHA_INFO_PTR;
++typedef struct _gcsALPHA_INFO
++{
++ /* Alpha test states. */
++ gctBOOL test;
++ gceCOMPARE compare;
++ gctUINT8 reference;
++ gctFLOAT floatReference;
++
++ /* Alpha blending states. */
++ gctBOOL blend;
++
++ gceBLEND_FUNCTION srcFuncColor;
++ gceBLEND_FUNCTION srcFuncAlpha;
++ gceBLEND_FUNCTION trgFuncColor;
++ gceBLEND_FUNCTION trgFuncAlpha;
++
++ gceBLEND_MODE modeColor;
++ gceBLEND_MODE modeAlpha;
++
++ gctUINT32 color;
++}
++gcsALPHA_INFO;
++
++/* Enable or disable alpha test. */
++gceSTATUS
++gco3D_SetAlphaTest(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set alpha test compare. */
++gceSTATUS
++gco3D_SetAlphaCompare(
++ IN gco3D Engine,
++ IN gceCOMPARE Compare
++ );
++
++/* Set alpha test reference in unsigned integer. */
++gceSTATUS
++gco3D_SetAlphaReference(
++ IN gco3D Engine,
++ IN gctUINT8 Reference,
++ IN gctFLOAT FloatReference
++ );
++
++/* Set alpha test reference in fixed point. */
++gceSTATUS
++gco3D_SetAlphaReferenceX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Reference
++ );
++
++/* Set alpha test reference in floating point. */
++gceSTATUS
++gco3D_SetAlphaReferenceF(
++ IN gco3D Engine,
++ IN gctFLOAT Reference
++ );
++
++/* Enable/Disable anti-alias line. */
++gceSTATUS
++gco3D_SetAntiAliasLine(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Set texture slot for anti-alias line. */
++gceSTATUS
++gco3D_SetAALineTexSlot(
++ IN gco3D Engine,
++ IN gctUINT TexSlot
++ );
++
++/* Set anti-alias line width scale. */
++gceSTATUS
++gco3D_SetAALineWidth(
++ IN gco3D Engine,
++ IN gctFLOAT Width
++ );
++
++/* Draw a number of primitives. */
++gceSTATUS
++gco3D_DrawPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT StartVertex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++gceSTATUS
++gco3D_DrawPrimitivesCount(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT* StartVertex,
++ IN gctSIZE_T* VertexCount,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++
++/* Draw a number of primitives using offsets. */
++gceSTATUS
++gco3D_DrawPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives. */
++gceSTATUS
++gco3D_DrawIndexedPrimitives(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT BaseVertex,
++ IN gctINT StartIndex,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Draw a number of indexed primitives using offsets. */
++gceSTATUS
++gco3D_DrawIndexedPrimitivesOffset(
++ IN gco3D Engine,
++ IN gcePRIMITIVE Type,
++ IN gctINT32 BaseOffset,
++ IN gctINT32 StartOffset,
++ IN gctSIZE_T PrimitiveCount
++ );
++
++/* Enable or disable anti-aliasing. */
++gceSTATUS
++gco3D_SetAntiAlias(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++/* Write data into the command buffer. */
++gceSTATUS
++gco3D_WriteBuffer(
++ IN gco3D Engine,
++ IN gctCONST_POINTER Data,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Aligned
++ );
++
++/* Send sempahore and stall until sempahore is signalled. */
++gceSTATUS
++gco3D_Semaphore(
++ IN gco3D Engine,
++ IN gceWHERE From,
++ IN gceWHERE To,
++ IN gceHOW How);
++
++/* Set the subpixels center. */
++gceSTATUS
++gco3D_SetCentroids(
++ IN gco3D Engine,
++ IN gctUINT32 Index,
++ IN gctPOINTER Centroids
++ );
++
++gceSTATUS
++gco3D_SetLogicOp(
++ IN gco3D Engine,
++ IN gctUINT8 Rop
++ );
++
++/* OCL thread walker information. */
++typedef struct _gcsTHREAD_WALKER_INFO * gcsTHREAD_WALKER_INFO_PTR;
++typedef struct _gcsTHREAD_WALKER_INFO
++{
++ gctUINT32 dimensions;
++ gctUINT32 traverseOrder;
++ gctUINT32 enableSwathX;
++ gctUINT32 enableSwathY;
++ gctUINT32 enableSwathZ;
++ gctUINT32 swathSizeX;
++ gctUINT32 swathSizeY;
++ gctUINT32 swathSizeZ;
++ gctUINT32 valueOrder;
++
++ gctUINT32 globalSizeX;
++ gctUINT32 globalOffsetX;
++ gctUINT32 globalSizeY;
++ gctUINT32 globalOffsetY;
++ gctUINT32 globalSizeZ;
++ gctUINT32 globalOffsetZ;
++
++ gctUINT32 workGroupSizeX;
++ gctUINT32 workGroupCountX;
++ gctUINT32 workGroupSizeY;
++ gctUINT32 workGroupCountY;
++ gctUINT32 workGroupSizeZ;
++ gctUINT32 workGroupCountZ;
++
++ gctUINT32 threadAllocation;
++}
++gcsTHREAD_WALKER_INFO;
++
++/* Start OCL thread walker. */
++gceSTATUS
++gco3D_InvokeThreadWalker(
++ IN gco3D Engine,
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++/* Set w clip and w plane limit value. */
++gceSTATUS
++gco3D_SetWClipEnable(
++ IN gco3D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco3D_GetWClipEnable(
++ IN gco3D Engine,
++ OUT gctBOOL * Enable
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitF(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++gceSTATUS
++gco3D_SetWPlaneLimitX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Value
++ );
++
++
++gceSTATUS
++gco3D_SetWPlaneLimit(
++ IN gco3D Engine,
++ IN gctFLOAT Value
++ );
++
++/*----------------------------------------------------------------------------*/
++/*-------------------------- gco3D Fragment Processor ------------------------*/
++
++/* Set the fragment processor configuration. */
++gceSTATUS
++gco3D_SetFragmentConfiguration(
++ IN gco3D Engine,
++ IN gctBOOL ColorFromStream,
++ IN gctBOOL EnableFog,
++ IN gctBOOL EnableSmoothPoint,
++ IN gctUINT32 ClipPlanes
++ );
++
++/* Enable/disable texture stage operation. */
++gceSTATUS
++gco3D_EnableTextureStage(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL Enable
++ );
++
++/* Program the channel enable masks for the color texture function. */
++gceSTATUS
++gco3D_SetTextureColorMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the channel enable masks for the alpha texture function. */
++gceSTATUS
++gco3D_SetTextureAlphaMask(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctBOOL ColorEnabled,
++ IN gctBOOL AlphaEnabled
++ );
++
++/* Program the constant fragment color. */
++gceSTATUS
++gco3D_SetFragmentColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFragmentColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant fog color. */
++gceSTATUS
++gco3D_SetFogColorX(
++ IN gco3D Engine,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetFogColorF(
++ IN gco3D Engine,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Program the constant texture color. */
++gceSTATUS
++gco3D_SetTetxureColorX(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFIXED_POINT Red,
++ IN gctFIXED_POINT Green,
++ IN gctFIXED_POINT Blue,
++ IN gctFIXED_POINT Alpha
++ );
++
++gceSTATUS
++gco3D_SetTetxureColorF(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++/* Configure color texture function. */
++gceSTATUS
++gco3D_SetColorTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/* Configure alpha texture function. */
++gceSTATUS
++gco3D_SetAlphaTextureFunction(
++ IN gco3D Engine,
++ IN gctINT Stage,
++ IN gceTEXTURE_FUNCTION Function,
++ IN gceTEXTURE_SOURCE Source0,
++ IN gceTEXTURE_CHANNEL Channel0,
++ IN gceTEXTURE_SOURCE Source1,
++ IN gceTEXTURE_CHANNEL Channel1,
++ IN gceTEXTURE_SOURCE Source2,
++ IN gceTEXTURE_CHANNEL Channel2,
++ IN gctINT Scale
++ );
++
++/* Invoke OCL thread walker. */
++gceSTATUS
++gcoHARDWARE_InvokeThreadWalker(
++ IN gcsTHREAD_WALKER_INFO_PTR Info
++ );
++
++/******************************************************************************\
++******************************* gcoTEXTURE Object *******************************
++\******************************************************************************/
++
++/* Cube faces. */
++typedef enum _gceTEXTURE_FACE
++{
++ gcvFACE_NONE,
++ gcvFACE_POSITIVE_X,
++ gcvFACE_NEGATIVE_X,
++ gcvFACE_POSITIVE_Y,
++ gcvFACE_NEGATIVE_Y,
++ gcvFACE_POSITIVE_Z,
++ gcvFACE_NEGATIVE_Z,
++}
++gceTEXTURE_FACE;
++
++#if gcdFORCE_MIPMAP
++typedef enum
++{
++ gcvForceMipDisabled = 0,
++ gcvForceMipEnable = 1,
++ gcvForceMipGenerated = 2,
++ gcvForceMipNever = 3,
++}gceFORCE_MIPMAP;
++#endif
++
++typedef struct _gcsTEXTURE
++{
++ /* Addressing modes. */
++ gceTEXTURE_ADDRESSING s;
++ gceTEXTURE_ADDRESSING t;
++ gceTEXTURE_ADDRESSING r;
++
++ /* Border color. */
++ gctUINT8 border[4];
++
++ /* Filters. */
++ gceTEXTURE_FILTER minFilter;
++ gceTEXTURE_FILTER magFilter;
++ gceTEXTURE_FILTER mipFilter;
++ gctUINT anisoFilter;
++ gctBOOL forceTopLevel;
++ gctBOOL autoMipmap;
++#if gcdFORCE_MIPMAP
++ gceFORCE_MIPMAP forceMipmap;
++#endif
++ /* Level of detail. */
++ gctFIXED_POINT lodBias;
++ gctFIXED_POINT lodMin;
++ gctFIXED_POINT lodMax;
++}
++gcsTEXTURE, * gcsTEXTURE_PTR;
++
++/* Construct a new gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Construct(
++ IN gcoHAL Hal,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Construct a new sized gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_ConstructSized(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gctUINT MipMapCount,
++ IN gcePOOL Pool,
++ OUT gcoTEXTURE * Texture
++ );
++
++/* Destroy an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Destroy(
++ IN gcoTEXTURE Texture
++ );
++#if gcdFORCE_MIPMAP
++gceSTATUS
++gcoTEXTURE_DestroyForceMipmap(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_GetMipLevels(
++ IN gcoTEXTURE Texture,
++ OUT gctINT * levels
++ );
++#endif
++/* Replace a mipmap in gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_ReplaceMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctINT imageFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_Upload(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctINT Stride,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadSub(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT X,
++ IN gctUINT Y,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctINT Stride,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload YUV data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadYUV(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctPOINTER Memory[3],
++ IN gctINT Stride[3],
++ IN gceSURF_FORMAT Format
++ );
++
++/* Upload compressed data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressed(
++ IN gcoTEXTURE Texture,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Upload compressed sub data to an gcoTEXTURE object. */
++gceSTATUS
++gcoTEXTURE_UploadCompressedSub(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ IN gctUINT XOffset,
++ IN gctUINT YOffset,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Slice,
++ IN gctCONST_POINTER Memory,
++ IN gctSIZE_T Size
++ );
++
++/* GetImageFormat of texture. */
++gceSTATUS
++gcoTEXTURE_GetImageFormat(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ OUT gctINT * ImageFormat
++ );
++
++/* Get gcoSURF object for a mipmap level. */
++gceSTATUS
++gcoTEXTURE_GetMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ OUT gcoSURF * Surface
++ );
++
++/* Get gcoSURF object for a mipmap level and face offset. */
++gceSTATUS
++gcoTEXTURE_GetMipMapFace(
++ IN gcoTEXTURE Texture,
++ IN gctUINT MipMap,
++ IN gceTEXTURE_FACE Face,
++ OUT gcoSURF * Surface,
++ OUT gctUINT32_PTR Offset
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gctINT imageFormat,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT Faces,
++ IN gcePOOL Pool,
++ OUT gcoSURF * Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromClient(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_AddMipMapFromSurface(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoTEXTURE_SetMaxLevel(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Levels
++ );
++
++gceSTATUS
++gcoTEXTURE_SetEndianHint(
++ IN gcoTEXTURE Texture,
++ IN gceENDIAN_HINT EndianHint
++ );
++
++gceSTATUS
++gcoTEXTURE_Disable(
++ IN gcoHAL Hal,
++ IN gctINT Sampler
++ );
++
++gceSTATUS
++gcoTEXTURE_Flush(
++ IN gcoTEXTURE Texture
++ );
++
++gceSTATUS
++gcoTEXTURE_QueryCaps(
++ IN gcoHAL Hal,
++ OUT gctUINT * MaxWidth,
++ OUT gctUINT * MaxHeight,
++ OUT gctUINT * MaxDepth,
++ OUT gctBOOL * Cubic,
++ OUT gctBOOL * NonPowerOfTwo,
++ OUT gctUINT * VertexSamplers,
++ OUT gctUINT * PixelSamplers
++ );
++
++gceSTATUS
++gcoTEXTURE_GetTiling(
++ IN gcoTEXTURE Texture,
++ IN gctINT preferLevel,
++ OUT gceTILING * Tiling
++ );
++
++gceSTATUS
++gcoTEXTURE_GetClosestFormat(
++ IN gcoHAL Hal,
++ IN gceSURF_FORMAT InFormat,
++ OUT gceSURF_FORMAT* OutFormat
++ );
++
++gceSTATUS
++gcoTEXTURE_RenderIntoMipMap(
++ IN gcoTEXTURE Texture,
++ IN gctINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsRenderable(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsRenderableEx(
++ IN gcoTEXTURE Texture,
++ IN gctUINT Level
++ );
++
++gceSTATUS
++gcoTEXTURE_IsComplete(
++ IN gcoTEXTURE Texture,
++ IN gctINT MaxLevel
++ );
++
++gceSTATUS
++gcoTEXTURE_BindTexture(
++ IN gcoTEXTURE Texture,
++ IN gctINT Target,
++ IN gctINT Sampler,
++ IN gcsTEXTURE_PTR Info
++ );
++
++/******************************************************************************\
++******************************* gcoSTREAM Object ******************************
++\******************************************************************************/
++
++typedef enum _gceVERTEX_FORMAT
++{
++ gcvVERTEX_BYTE,
++ gcvVERTEX_UNSIGNED_BYTE,
++ gcvVERTEX_SHORT,
++ gcvVERTEX_UNSIGNED_SHORT,
++ gcvVERTEX_INT,
++ gcvVERTEX_UNSIGNED_INT,
++ gcvVERTEX_FIXED,
++ gcvVERTEX_HALF,
++ gcvVERTEX_FLOAT,
++ gcvVERTEX_UNSIGNED_INT_10_10_10_2,
++ gcvVERTEX_INT_10_10_10_2,
++}
++gceVERTEX_FORMAT;
++
++gceSTATUS
++gcoSTREAM_Construct(
++ IN gcoHAL Hal,
++ OUT gcoSTREAM * Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Destroy(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Upload(
++ IN gcoSTREAM Stream,
++ IN gctCONST_POINTER Buffer,
++ IN gctUINT32 Offset,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Dynamic
++ );
++
++gceSTATUS
++gcoSTREAM_SetStride(
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoSTREAM_Lock(
++ IN gcoSTREAM Stream,
++ OUT gctPOINTER * Logical,
++ OUT gctUINT32 * Physical
++ );
++
++gceSTATUS
++gcoSTREAM_Unlock(
++ IN gcoSTREAM Stream
++ );
++
++gceSTATUS
++gcoSTREAM_Reserve(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gcoSTREAM_Flush(
++ IN gcoSTREAM Stream
++ );
++
++/* Dynamic buffer API. */
++gceSTATUS
++gcoSTREAM_SetDynamic(
++ IN gcoSTREAM Stream,
++ IN gctSIZE_T Bytes,
++ IN gctUINT Buffers
++ );
++
++typedef struct _gcsSTREAM_INFO
++{
++ gctUINT index;
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT components;
++ gctSIZE_T size;
++ gctCONST_POINTER data;
++ gctUINT stride;
++}
++gcsSTREAM_INFO, * gcsSTREAM_INFO_PTR;
++
++gceSTATUS
++gcoSTREAM_UploadDynamic(
++ IN gcoSTREAM Stream,
++ IN gctUINT VertexCount,
++ IN gctUINT InfoCount,
++ IN gcsSTREAM_INFO_PTR Info,
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoSTREAM_CPUCacheOperation(
++ IN gcoSTREAM Stream,
++ IN gceCACHEOPERATION Operation
++ );
++
++/******************************************************************************\
++******************************** gcoVERTEX Object ******************************
++\******************************************************************************/
++
++typedef struct _gcsVERTEX_ATTRIBUTES
++{
++ gceVERTEX_FORMAT format;
++ gctBOOL normalized;
++ gctUINT32 components;
++ gctSIZE_T size;
++ gctUINT32 stream;
++ gctUINT32 offset;
++ gctUINT32 stride;
++}
++gcsVERTEX_ATTRIBUTES;
++
++gceSTATUS
++gcoVERTEX_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEX * Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Destroy(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_Reset(
++ IN gcoVERTEX Vertex
++ );
++
++gceSTATUS
++gcoVERTEX_EnableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index,
++ IN gceVERTEX_FORMAT Format,
++ IN gctBOOL Normalized,
++ IN gctUINT32 Components,
++ IN gcoSTREAM Stream,
++ IN gctUINT32 Offset,
++ IN gctUINT32 Stride
++ );
++
++gceSTATUS
++gcoVERTEX_DisableAttribute(
++ IN gcoVERTEX Vertex,
++ IN gctUINT32 Index
++ );
++
++gceSTATUS
++gcoVERTEX_Bind(
++ IN gcoVERTEX Vertex
++ );
++
++/*******************************************************************************
++***** gcoVERTEXARRAY Object ***************************************************/
++
++typedef struct _gcsVERTEXARRAY
++{
++ /* Enabled. */
++ gctBOOL enable;
++
++ /* Number of components. */
++ gctINT size;
++
++ /* Attribute format. */
++ gceVERTEX_FORMAT format;
++
++ /* Flag whether the attribute is normalized or not. */
++ gctBOOL normalized;
++
++ /* Stride of the component. */
++ gctUINT stride;
++
++ /* Pointer to the attribute data. */
++ gctCONST_POINTER pointer;
++
++ /* Stream object owning the attribute data. */
++ gcoSTREAM stream;
++
++ /* Generic values for attribute. */
++ gctFLOAT genericValue[4];
++
++ /* Generic size for attribute. */
++ gctINT genericSize;
++
++ /* Vertex shader linkage. */
++ gctUINT linkage;
++
++#if gcdUSE_WCLIP_PATCH
++ gctBOOL isPosition;
++#endif
++}
++gcsVERTEXARRAY,
++* gcsVERTEXARRAY_PTR;
++
++gceSTATUS
++gcoVERTEXARRAY_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVERTEXARRAY * Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Destroy(
++ IN gcoVERTEXARRAY Vertex
++ );
++
++gceSTATUS
++gcoVERTEXARRAY_Bind(
++ IN gcoVERTEXARRAY Vertex,
++ IN gctUINT32 EnableBits,
++ IN gcsVERTEXARRAY_PTR VertexArray,
++ IN gctUINT First,
++ IN gctSIZE_T Count,
++ IN gceINDEX_TYPE IndexType,
++ IN gcoINDEX IndexObject,
++ IN gctPOINTER IndexMemory,
++ IN OUT gcePRIMITIVE * PrimitiveType,
++#if gcdUSE_WCLIP_PATCH
++ IN OUT gctUINT * PrimitiveCount,
++ IN OUT gctFLOAT * wLimitRms,
++ IN OUT gctBOOL * wLimitDirty
++#else
++ IN OUT gctUINT * PrimitiveCount
++#endif
++ );
++
++gctUINT
++gcoVERTEXARRAY_GetMaxStream(
++ IN gcoVERTEXARRAY Vertex
++);
++
++gceSTATUS
++gcoVERTEXARRAY_SetMaxStream(
++ IN gcoVERTEXARRAY Vertex,
++ gctUINT maxStreams
++);
++/*******************************************************************************
++***** Composition *************************************************************/
++
++typedef enum _gceCOMPOSITION
++{
++ gcvCOMPOSE_CLEAR = 1,
++ gcvCOMPOSE_BLUR,
++ gcvCOMPOSE_DIM,
++ gcvCOMPOSE_LAYER
++}
++gceCOMPOSITION;
++
++typedef struct _gcsCOMPOSITION * gcsCOMPOSITION_PTR;
++typedef struct _gcsCOMPOSITION
++{
++ /* Structure size. */
++ gctUINT structSize;
++
++ /* Composition operation. */
++ gceCOMPOSITION operation;
++
++ /* Layer to be composed. */
++ gcoSURF layer;
++
++ /* Source and target coordinates. */
++ gcsRECT srcRect;
++ gcsRECT trgRect;
++
++ /* Target rectangle */
++ gcsPOINT v0;
++ gcsPOINT v1;
++ gcsPOINT v2;
++
++ /* Blending parameters. */
++ gctBOOL enableBlending;
++ gctBOOL premultiplied;
++ gctUINT8 alphaValue;
++
++ /* Clear color. */
++ gctFLOAT r;
++ gctFLOAT g;
++ gctFLOAT b;
++ gctFLOAT a;
++}
++gcsCOMPOSITION;
++
++gceSTATUS
++gco3D_ProbeComposition(
++ gctBOOL ResetIfEmpty
++ );
++
++gceSTATUS
++gco3D_CompositionBegin(
++ void
++ );
++
++gceSTATUS
++gco3D_ComposeLayer(
++ IN gcsCOMPOSITION_PTR Layer
++ );
++
++gceSTATUS
++gco3D_CompositionSignals(
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal1,
++ IN gctSIGNAL Signal2
++ );
++
++gceSTATUS
++gco3D_CompositionEnd(
++ IN gcoSURF Target,
++ IN gctBOOL Synchronous
++ );
++
++/* Frame Database */
++gceSTATUS
++gcoHAL_AddFrameDB(
++ void
++ );
++
++gceSTATUS
++gcoHAL_DumpFrameDB(
++ gctCONST_STRING Filename OPTIONAL
++ );
++
++gceSTATUS
++gcoHAL_GetSharedInfo(
++ IN gctUINT32 Pid,
++ IN gctUINT32 DataId,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER Data
++ );
++
++gceSTATUS
++gcoHAL_SetSharedInfo(
++ IN gctUINT32 DataId,
++ IN gctPOINTER Data,
++ IN gctSIZE_T Bytes
++ );
++
++#if VIVANTE_PROFILER_CONTEXT
++gceSTATUS
++gcoHARDWARE_GetContext(
++ IN gcoHARDWARE Hardware,
++ OUT gctUINT32 * Context
++ );
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_engine_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_engine_vg.h 2015-07-27 23:13:06.214808565 +0200
+@@ -0,0 +1,904 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_engine_vg_h_
++#define __gc_hal_engine_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include "gc_hal_types.h"
++
++/******************************************************************************\
++******************************** VG Enumerations *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Tiling mode for painting and imagig.
++**
++** This enumeration defines the tiling modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 tile modes.
++*/
++typedef enum _gceTILE_MODE
++{
++ gcvTILE_FILL,
++ gcvTILE_PAD,
++ gcvTILE_REPEAT,
++ gcvTILE_REFLECT
++}
++gceTILE_MODE;
++
++/******************************************************************************/
++/** @ingroup gcoVG
++**
++** @brief The different paint modes.
++**
++** This enumeration lists the available paint modes.
++*/
++typedef enum _gcePAINT_TYPE
++{
++ /** Solid color. */
++ gcvPAINT_MODE_SOLID,
++
++ /** Linear gradient. */
++ gcvPAINT_MODE_LINEAR,
++
++ /** Radial gradient. */
++ gcvPAINT_MODE_RADIAL,
++
++ /** Pattern. */
++ gcvPAINT_MODE_PATTERN,
++
++ /** Mode count. */
++ gcvPAINT_MODE_COUNT
++}
++gcePAINT_TYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Types of path data supported by HAL.
++**
++** This enumeration defines the types of path data supported by the HAL.
++** This is in fact a one-to-one mapping of the OpenVG 1.1 path types.
++*/
++typedef enum _gcePATHTYPE
++{
++ gcePATHTYPE_UNKNOWN = -1,
++ gcePATHTYPE_INT8,
++ gcePATHTYPE_INT16,
++ gcePATHTYPE_INT32,
++ gcePATHTYPE_FLOAT
++}
++gcePATHTYPE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Supported path segment commands.
++**
++** This enumeration defines the path segment commands supported by the HAL.
++*/
++typedef enum _gceVGCMD
++{
++ gcvVGCMD_END, /* 0: GCCMD_TS_OPCODE_END */
++ gcvVGCMD_CLOSE, /* 1: GCCMD_TS_OPCODE_CLOSE */
++ gcvVGCMD_MOVE, /* 2: GCCMD_TS_OPCODE_MOVE */
++ gcvVGCMD_MOVE_REL, /* 3: GCCMD_TS_OPCODE_MOVE_REL */
++ gcvVGCMD_LINE, /* 4: GCCMD_TS_OPCODE_LINE */
++ gcvVGCMD_LINE_REL, /* 5: GCCMD_TS_OPCODE_LINE_REL */
++ gcvVGCMD_QUAD, /* 6: GCCMD_TS_OPCODE_QUADRATIC */
++ gcvVGCMD_QUAD_REL, /* 7: GCCMD_TS_OPCODE_QUADRATIC_REL */
++ gcvVGCMD_CUBIC, /* 8: GCCMD_TS_OPCODE_CUBIC */
++ gcvVGCMD_CUBIC_REL, /* 9: GCCMD_TS_OPCODE_CUBIC_REL */
++ gcvVGCMD_BREAK, /* 10: GCCMD_TS_OPCODE_BREAK */
++ gcvVGCMD_HLINE, /* 11: ******* R E S E R V E D *******/
++ gcvVGCMD_HLINE_REL, /* 12: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE, /* 13: ******* R E S E R V E D *******/
++ gcvVGCMD_VLINE_REL, /* 14: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD, /* 15: ******* R E S E R V E D *******/
++ gcvVGCMD_SQUAD_REL, /* 16: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC, /* 17: ******* R E S E R V E D *******/
++ gcvVGCMD_SCUBIC_REL, /* 18: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC, /* 19: ******* R E S E R V E D *******/
++ gcvVGCMD_SCCWARC_REL, /* 20: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC, /* 21: ******* R E S E R V E D *******/
++ gcvVGCMD_SCWARC_REL, /* 22: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC, /* 23: ******* R E S E R V E D *******/
++ gcvVGCMD_LCCWARC_REL, /* 24: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC, /* 25: ******* R E S E R V E D *******/
++ gcvVGCMD_LCWARC_REL, /* 26: ******* R E S E R V E D *******/
++
++ /* The width of the command recognized by the hardware on bits. */
++ gcvVGCMD_WIDTH = 5,
++
++ /* Hardware command mask. */
++ gcvVGCMD_MASK = (1 << gcvVGCMD_WIDTH) - 1,
++
++ /* Command modifiers. */
++ gcvVGCMD_H_MOD = 1 << gcvVGCMD_WIDTH, /* = 32 */
++ gcvVGCMD_V_MOD = 2 << gcvVGCMD_WIDTH, /* = 64 */
++ gcvVGCMD_S_MOD = 3 << gcvVGCMD_WIDTH, /* = 96 */
++ gcvVGCMD_ARC_MOD = 4 << gcvVGCMD_WIDTH, /* = 128 */
++
++ /* Emulated LINE commands. */
++ gcvVGCMD_HLINE_EMUL = gcvVGCMD_H_MOD | gcvVGCMD_LINE, /* = 36 */
++ gcvVGCMD_HLINE_EMUL_REL = gcvVGCMD_H_MOD | gcvVGCMD_LINE_REL, /* = 37 */
++ gcvVGCMD_VLINE_EMUL = gcvVGCMD_V_MOD | gcvVGCMD_LINE, /* = 68 */
++ gcvVGCMD_VLINE_EMUL_REL = gcvVGCMD_V_MOD | gcvVGCMD_LINE_REL, /* = 69 */
++
++ /* Emulated SMOOTH commands. */
++ gcvVGCMD_SQUAD_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD, /* = 102 */
++ gcvVGCMD_SQUAD_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_QUAD_REL, /* = 103 */
++ gcvVGCMD_SCUBIC_EMUL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC, /* = 104 */
++ gcvVGCMD_SCUBIC_EMUL_REL = gcvVGCMD_S_MOD | gcvVGCMD_CUBIC_REL, /* = 105 */
++
++ /* Emulation ARC commands. */
++ gcvVGCMD_ARC_LINE = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE, /* = 132 */
++ gcvVGCMD_ARC_LINE_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_LINE_REL, /* = 133 */
++ gcvVGCMD_ARC_QUAD = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD, /* = 134 */
++ gcvVGCMD_ARC_QUAD_REL = gcvVGCMD_ARC_MOD | gcvVGCMD_QUAD_REL /* = 135 */
++}
++gceVGCMD;
++typedef enum _gceVGCMD * gceVGCMD_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Blending modes supported by the HAL.
++**
++** This enumeration defines the blending modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 blending modes.
++*/
++typedef enum _gceVG_BLEND
++{
++ gcvVG_BLEND_SRC,
++ gcvVG_BLEND_SRC_OVER,
++ gcvVG_BLEND_DST_OVER,
++ gcvVG_BLEND_SRC_IN,
++ gcvVG_BLEND_DST_IN,
++ gcvVG_BLEND_MULTIPLY,
++ gcvVG_BLEND_SCREEN,
++ gcvVG_BLEND_DARKEN,
++ gcvVG_BLEND_LIGHTEN,
++ gcvVG_BLEND_ADDITIVE,
++ gcvVG_BLEND_SUBTRACT,
++ gcvVG_BLEND_FILTER
++}
++gceVG_BLEND;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Image modes supported by the HAL.
++**
++** This enumeration defines the image modes supported by the HAL. This is
++** in fact a one-to-one mapping of the OpenVG 1.1 image modes with the addition
++** of NO IMAGE.
++*/
++typedef enum _gceVG_IMAGE
++{
++ gcvVG_IMAGE_NONE,
++ gcvVG_IMAGE_NORMAL,
++ gcvVG_IMAGE_MULTIPLY,
++ gcvVG_IMAGE_STENCIL,
++ gcvVG_IMAGE_FILTER
++}
++gceVG_IMAGE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Filter mode patterns and imaging.
++**
++** This enumeration defines the filter modes supported by the HAL.
++*/
++typedef enum _gceIMAGE_FILTER
++{
++ gcvFILTER_POINT,
++ gcvFILTER_LINEAR,
++ gcvFILTER_BI_LINEAR
++}
++gceIMAGE_FILTER;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Primitive modes supported by the HAL.
++**
++** This enumeration defines the primitive modes supported by the HAL.
++*/
++typedef enum _gceVG_PRIMITIVE
++{
++ gcvVG_SCANLINE,
++ gcvVG_RECTANGLE,
++ gcvVG_TESSELLATED,
++ gcvVG_TESSELLATED_TILED
++}
++gceVG_PRIMITIVE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rendering quality modes supported by the HAL.
++**
++** This enumeration defines the rendering quality modes supported by the HAL.
++*/
++typedef enum _gceRENDER_QUALITY
++{
++ gcvVG_NONANTIALIASED,
++ gcvVG_2X2_MSAA,
++ gcvVG_2X4_MSAA,
++ gcvVG_4X4_MSAA
++}
++gceRENDER_QUALITY;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Fill rules supported by the HAL.
++**
++** This enumeration defines the fill rules supported by the HAL.
++*/
++typedef enum _gceFILL_RULE
++{
++ gcvVG_EVEN_ODD,
++ gcvVG_NON_ZERO
++}
++gceFILL_RULE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Cap styles supported by the HAL.
++**
++** This enumeration defines the cap styles supported by the HAL.
++*/
++typedef enum _gceCAP_STYLE
++{
++ gcvCAP_BUTT,
++ gcvCAP_ROUND,
++ gcvCAP_SQUARE
++}
++gceCAP_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Join styles supported by the HAL.
++**
++** This enumeration defines the join styles supported by the HAL.
++*/
++typedef enum _gceJOIN_STYLE
++{
++ gcvJOIN_MITER,
++ gcvJOIN_ROUND,
++ gcvJOIN_BEVEL
++}
++gceJOIN_STYLE;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Channel mask values.
++**
++** This enumeration defines the values for channel mask used in image
++** filtering.
++*/
++
++/* Base values for channel mask definitions. */
++#define gcvCHANNEL_X (0)
++#define gcvCHANNEL_R (1 << 0)
++#define gcvCHANNEL_G (1 << 1)
++#define gcvCHANNEL_B (1 << 2)
++#define gcvCHANNEL_A (1 << 3)
++
++typedef enum _gceCHANNEL
++{
++ gcvCHANNEL_XXXX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XXXA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XXBX = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XXBA = (gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_XGXX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_XGXA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_XGBX = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_XGBA = (gcvCHANNEL_X | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RXXX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RXXA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RXBX = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RXBA = (gcvCHANNEL_R | gcvCHANNEL_X | gcvCHANNEL_B | gcvCHANNEL_A),
++
++ gcvCHANNEL_RGXX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_X),
++ gcvCHANNEL_RGXA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_X | gcvCHANNEL_A),
++ gcvCHANNEL_RGBX = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_X),
++ gcvCHANNEL_RGBA = (gcvCHANNEL_R | gcvCHANNEL_G | gcvCHANNEL_B | gcvCHANNEL_A),
++}
++gceCHANNEL;
++
++/******************************************************************************\
++******************************** VG Structures *******************************
++\******************************************************************************/
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsCOLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFLOAT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFLOAT red;
++
++ /** Green color channel value for the color stop. */
++ gctFLOAT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFLOAT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFLOAT alpha;
++}
++gcsCOLOR_RAMP, * gcsCOLOR_RAMP_PTR;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the color ramp used by the gradient paints in fixed form.
++**
++** The gcsCOLOR_RAMP structure defines the layout of one single color inside
++** a color ramp which is used by gradient paints.
++*/
++typedef struct _gcsFIXED_COLOR_RAMP
++{
++ /** Value for the color stop. */
++ gctFIXED_POINT stop;
++
++ /** Red color channel value for the color stop. */
++ gctFIXED_POINT red;
++
++ /** Green color channel value for the color stop. */
++ gctFIXED_POINT green;
++
++ /** Blue color channel value for the color stop. */
++ gctFIXED_POINT blue;
++
++ /** Alpha color channel value for the color stop. */
++ gctFIXED_POINT alpha;
++}
++gcsFIXED_COLOR_RAMP, * gcsFIXED_COLOR_RAMP_PTR;
++
++
++/**
++** @ingroup gcoVG
++**
++** @brief Rectangle structure used by the gcoVG object.
++**
++** This structure defines the layout of a rectangle. Make sure width and
++** height are larger than 0.
++*/
++typedef struct _gcsVG_RECT * gcsVG_RECT_PTR;
++typedef struct _gcsVG_RECT
++{
++ /** Left location of the rectangle. */
++ gctINT x;
++
++ /** Top location of the rectangle. */
++ gctINT y;
++
++ /** Width of the rectangle. */
++ gctINT width;
++
++ /** Height of the rectangle. */
++ gctINT height;
++}
++gcsVG_RECT;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Path command buffer attribute structure.
++**
++** The gcsPATH_BUFFER_INFO structure contains the specifics about
++** the layout of the path data command buffer.
++*/
++typedef struct _gcsPATH_BUFFER_INFO * gcsPATH_BUFFER_INFO_PTR;
++typedef struct _gcsPATH_BUFFER_INFO
++{
++ gctUINT reservedForHead;
++ gctUINT reservedForTail;
++}
++gcsPATH_BUFFER_INFO;
++
++/**
++** @ingroup gcoVG
++**
++** @brief Definition of the path data container structure.
++**
++** The gcsPATH structure defines the layout of the path data container.
++*/
++typedef struct _gcsPATH_DATA * gcsPATH_DATA_PTR;
++typedef struct _gcsPATH_DATA
++{
++ /* Data container in command buffer format. */
++ gcsCMDBUFFER data;
++
++ /* Path data type. */
++ gcePATHTYPE dataType;
++}
++gcsPATH_DATA;
++
++
++/******************************************************************************\
++********************************* gcoHAL Object ********************************
++\******************************************************************************/
++
++/* Query path data storage attributes. */
++gceSTATUS
++gcoHAL_QueryPathStorage(
++ IN gcoHAL Hal,
++ OUT gcsPATH_BUFFER_INFO_PTR Information
++ );
++
++/* Associate a completion signal with the command buffer. */
++gceSTATUS
++gcoHAL_AssociateCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Release the current command buffer completion signal. */
++gceSTATUS
++gcoHAL_DeassociateCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Verify whether the command buffer is still in use. */
++gceSTATUS
++gcoHAL_CheckCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Wait until the command buffer is no longer in use. */
++gceSTATUS
++gcoHAL_WaitCompletion(
++ IN gcoHAL Hal,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++/* Flush the pixel cache. */
++gceSTATUS
++gcoHAL_Flush(
++ IN gcoHAL Hal
++ );
++
++/* Split a harwdare address into pool and offset. */
++gceSTATUS
++gcoHAL_SplitAddress(
++ IN gcoHAL Hal,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Combine pool and offset into a harwdare address. */
++gceSTATUS
++gcoHAL_CombineAddress(
++ IN gcoHAL Hal,
++ IN gcePOOL Pool,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Schedule to free linear video memory allocated. */
++gceSTATUS
++gcoHAL_ScheduleVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT64 Node
++ );
++
++/* Free linear video memory allocated with gcoHAL_AllocateLinearVideoMemory. */
++gceSTATUS
++gcoHAL_FreeVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT64 Node
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gcoHAL_QueryCommandBuffer(
++ IN gcoHAL Hal,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++/* Allocate and lock linear video memory. */
++gceSTATUS
++gcoHAL_AllocateLinearVideoMemory(
++ IN gcoHAL Hal,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ IN gcePOOL Pool,
++ OUT gctUINT64 * Node,
++ OUT gctUINT32 * Address,
++ OUT gctPOINTER * Memory
++ );
++
++/* Align the specified size accordingly to the hardware requirements. */
++gceSTATUS
++gcoHAL_GetAlignedSurfaceSize(
++ IN gcoHAL Hal,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++gceSTATUS
++gcoHAL_ReserveTask(
++ IN gcoHAL Hal,
++ IN gceBLOCK Block,
++ IN gctUINT TaskCount,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++/******************************************************************************\
++********************************** gcoVG Object ********************************
++\******************************************************************************/
++
++/** @defgroup gcoVG gcoVG
++**
++** The gcoVG object abstracts the VG hardware pipe.
++*/
++
++gctBOOL
++gcoVG_IsMaskSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsTargetSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctBOOL
++gcoVG_IsImageSupported(
++ IN gceSURF_FORMAT Format
++ );
++
++gctUINT8 gcoVG_PackColorComponent(
++ gctFLOAT Value
++ );
++
++gceSTATUS
++gcoVG_Construct(
++ IN gcoHAL Hal,
++ OUT gcoVG * Vg
++ );
++
++gceSTATUS
++gcoVG_Destroy(
++ IN gcoVG Vg
++ );
++
++gceSTATUS
++gcoVG_SetTarget(
++ IN gcoVG Vg,
++ IN gcoSURF Target
++ );
++
++gceSTATUS
++gcoVG_UnsetTarget(
++ IN gcoVG Vg,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_SetUserToSurface(
++ IN gcoVG Vg,
++ IN gctFLOAT UserToSurface[9]
++ );
++
++gceSTATUS
++gcoVG_SetSurfaceToImage(
++ IN gcoVG Vg,
++ IN gctFLOAT SurfaceToImage[9]
++ );
++
++gceSTATUS
++gcoVG_EnableMask(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetMask(
++ IN gcoVG Vg,
++ IN gcoSURF Mask
++ );
++
++gceSTATUS
++gcoVG_UnsetMask(
++ IN gcoVG Vg,
++ IN gcoSURF Surface
++ );
++
++gceSTATUS
++gcoVG_FlushMask(
++ IN gcoVG Vg
++ );
++
++gceSTATUS
++gcoVG_EnableScissor(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetScissor(
++ IN gcoVG Vg,
++ IN gctSIZE_T RectangleCount,
++ IN gcsVG_RECT_PTR Rectangles
++ );
++
++gceSTATUS
++gcoVG_EnableColorTransform(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gcoVG_SetColorTransform(
++ IN gcoVG Vg,
++ IN gctFLOAT ColorTransform[8]
++ );
++
++gceSTATUS
++gcoVG_SetTileFillColor(
++ IN gcoVG Vg,
++ IN gctFLOAT Red,
++ IN gctFLOAT Green,
++ IN gctFLOAT Blue,
++ IN gctFLOAT Alpha
++ );
++
++gceSTATUS
++gcoVG_SetSolidPaint(
++ IN gcoVG Vg,
++ IN gctUINT8 Red,
++ IN gctUINT8 Green,
++ IN gctUINT8 Blue,
++ IN gctUINT8 Alpha
++ );
++
++gceSTATUS
++gcoVG_SetLinearPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT Constant,
++ IN gctFLOAT StepX,
++ IN gctFLOAT StepY
++ );
++
++gceSTATUS
++gcoVG_SetRadialPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT LinConstant,
++ IN gctFLOAT LinStepX,
++ IN gctFLOAT LinStepY,
++ IN gctFLOAT RadConstant,
++ IN gctFLOAT RadStepX,
++ IN gctFLOAT RadStepY,
++ IN gctFLOAT RadStepXX,
++ IN gctFLOAT RadStepYY,
++ IN gctFLOAT RadStepXY
++ );
++
++gceSTATUS
++gcoVG_SetPatternPaint(
++ IN gcoVG Vg,
++ IN gctFLOAT UConstant,
++ IN gctFLOAT UStepX,
++ IN gctFLOAT UStepY,
++ IN gctFLOAT VConstant,
++ IN gctFLOAT VStepX,
++ IN gctFLOAT VStepY,
++ IN gctBOOL Linear
++ );
++
++gceSTATUS
++gcoVG_SetColorRamp(
++ IN gcoVG Vg,
++ IN gcoSURF ColorRamp,
++ IN gceTILE_MODE ColorRampSpreadMode
++ );
++
++gceSTATUS
++gcoVG_SetPattern(
++ IN gcoVG Vg,
++ IN gcoSURF Pattern,
++ IN gceTILE_MODE TileMode,
++ IN gceIMAGE_FILTER Filter
++ );
++
++gceSTATUS
++gcoVG_SetImageMode(
++ IN gcoVG Vg,
++ IN gceVG_IMAGE Mode
++ );
++
++gceSTATUS
++gcoVG_SetBlendMode(
++ IN gcoVG Vg,
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_SetRenderingQuality(
++ IN gcoVG Vg,
++ IN gceRENDER_QUALITY Quality
++ );
++
++gceSTATUS
++gcoVG_SetFillRule(
++ IN gcoVG Vg,
++ IN gceFILL_RULE FillRule
++ );
++
++gceSTATUS
++gcoVG_FinalizePath(
++ IN gcoVG Vg,
++ IN gcsPATH_DATA_PTR PathData
++ );
++
++gceSTATUS
++gcoVG_Clear(
++ IN gcoVG Vg,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_DrawPath(
++ IN gcoVG Vg,
++ IN gcsPATH_DATA_PTR PathData,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++ IN gctBOOL SoftwareTesselation
++ );
++
++gceSTATUS
++gcoVG_DrawImage(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height,
++ IN gctBOOL Mask
++ );
++
++gceSTATUS
++gcoVG_TesselateImage(
++ IN gcoVG Vg,
++ IN gcoSURF Image,
++ IN gcsVG_RECT_PTR Rectangle,
++ IN gceIMAGE_FILTER Filter,
++ IN gctBOOL Mask,
++ IN gctBOOL SoftwareTesselation
++ );
++
++gceSTATUS
++gcoVG_Blit(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gcsVG_RECT_PTR SrcRect,
++ IN gcsVG_RECT_PTR TrgRect,
++ IN gceIMAGE_FILTER Filter,
++ IN gceVG_BLEND Mode
++ );
++
++gceSTATUS
++gcoVG_ColorMatrix(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN const gctFLOAT * Matrix,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_SeparableConvolve(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT KernelWidth,
++ IN gctINT KernelHeight,
++ IN gctINT ShiftX,
++ IN gctINT ShiftY,
++ IN const gctINT16 * KernelX,
++ IN const gctINT16 * KernelY,
++ IN gctFLOAT Scale,
++ IN gctFLOAT Bias,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_GaussianBlur(
++ IN gcoVG Vg,
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctFLOAT StdDeviationX,
++ IN gctFLOAT StdDeviationY,
++ IN gceTILE_MODE TilingMode,
++ IN gctFLOAT_PTR FillColor,
++ IN gceCHANNEL ColorChannels,
++ IN gctBOOL FilterLinear,
++ IN gctBOOL FilterPremultiplied,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsPOINT_PTR TargetOrigin,
++ IN gcsSIZE_PTR SourceSize,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++gceSTATUS
++gcoVG_EnableDither(
++ IN gcoVG Vg,
++ IN gctBOOL Enable
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_vg_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_enum.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_enum.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_enum.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_enum.h 2015-07-27 23:13:06.214808565 +0200
+@@ -0,0 +1,965 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_enum_h_
++#define __gc_hal_enum_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/* Chip models. */
++typedef enum _gceCHIPMODEL
++{
++ gcv300 = 0x0300,
++ gcv320 = 0x0320,
++ gcv350 = 0x0350,
++ gcv355 = 0x0355,
++ gcv400 = 0x0400,
++ gcv410 = 0x0410,
++ gcv420 = 0x0420,
++ gcv450 = 0x0450,
++ gcv500 = 0x0500,
++ gcv530 = 0x0530,
++ gcv600 = 0x0600,
++ gcv700 = 0x0700,
++ gcv800 = 0x0800,
++ gcv860 = 0x0860,
++ gcv880 = 0x0880,
++ gcv1000 = 0x1000,
++ gcv2000 = 0x2000,
++ gcv2100 = 0x2100,
++ gcv4000 = 0x4000,
++}
++gceCHIPMODEL;
++
++/* Chip features. */
++typedef enum _gceFEATURE
++{
++ gcvFEATURE_PIPE_2D = 0,
++ gcvFEATURE_PIPE_3D,
++ gcvFEATURE_PIPE_VG,
++ gcvFEATURE_DC,
++ gcvFEATURE_HIGH_DYNAMIC_RANGE,
++ gcvFEATURE_MODULE_CG,
++ gcvFEATURE_MIN_AREA,
++ gcvFEATURE_BUFFER_INTERLEAVING,
++ gcvFEATURE_BYTE_WRITE_2D,
++ gcvFEATURE_ENDIANNESS_CONFIG,
++ gcvFEATURE_DUAL_RETURN_BUS,
++ gcvFEATURE_DEBUG_MODE,
++ gcvFEATURE_YUY2_RENDER_TARGET,
++ gcvFEATURE_FRAGMENT_PROCESSOR,
++ gcvFEATURE_2DPE20,
++ gcvFEATURE_FAST_CLEAR,
++ gcvFEATURE_YUV420_TILER,
++ gcvFEATURE_YUY2_AVERAGING,
++ gcvFEATURE_FLIP_Y,
++ gcvFEATURE_EARLY_Z,
++ gcvFEATURE_Z_COMPRESSION,
++ gcvFEATURE_MSAA,
++ gcvFEATURE_SPECIAL_ANTI_ALIASING,
++ gcvFEATURE_SPECIAL_MSAA_LOD,
++ gcvFEATURE_422_TEXTURE_COMPRESSION,
++ gcvFEATURE_DXT_TEXTURE_COMPRESSION,
++ gcvFEATURE_ETC1_TEXTURE_COMPRESSION,
++ gcvFEATURE_CORRECT_TEXTURE_CONVERTER,
++ gcvFEATURE_TEXTURE_8K,
++ gcvFEATURE_SCALER,
++ gcvFEATURE_YUV420_SCALER,
++ gcvFEATURE_SHADER_HAS_W,
++ gcvFEATURE_SHADER_HAS_SIGN,
++ gcvFEATURE_SHADER_HAS_FLOOR,
++ gcvFEATURE_SHADER_HAS_CEIL,
++ gcvFEATURE_SHADER_HAS_SQRT,
++ gcvFEATURE_SHADER_HAS_TRIG,
++ gcvFEATURE_VAA,
++ gcvFEATURE_HZ,
++ gcvFEATURE_CORRECT_STENCIL,
++ gcvFEATURE_VG20,
++ gcvFEATURE_VG_FILTER,
++ gcvFEATURE_VG21,
++ gcvFEATURE_VG_DOUBLE_BUFFER,
++ gcvFEATURE_MC20,
++ gcvFEATURE_SUPER_TILED,
++ gcvFEATURE_2D_FILTERBLIT_PLUS_ALPHABLEND,
++ gcvFEATURE_2D_DITHER,
++ gcvFEATURE_2D_A8_TARGET,
++ gcvFEATURE_2D_FILTERBLIT_FULLROTATION,
++ gcvFEATURE_2D_BITBLIT_FULLROTATION,
++ gcvFEATURE_WIDE_LINE,
++ gcvFEATURE_FC_FLUSH_STALL,
++ gcvFEATURE_FULL_DIRECTFB,
++ gcvFEATURE_HALF_FLOAT_PIPE,
++ gcvFEATURE_LINE_LOOP,
++ gcvFEATURE_2D_YUV_BLIT,
++ gcvFEATURE_2D_TILING,
++ gcvFEATURE_NON_POWER_OF_TWO,
++ gcvFEATURE_3D_TEXTURE,
++ gcvFEATURE_TEXTURE_ARRAY,
++ gcvFEATURE_TILE_FILLER,
++ gcvFEATURE_LOGIC_OP,
++ gcvFEATURE_COMPOSITION,
++ gcvFEATURE_MIXED_STREAMS,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT,
++ gcvFEATURE_END_EVENT,
++ gcvFEATURE_VERTEX_10_10_10_2,
++ gcvFEATURE_TEXTURE_10_10_10_2,
++ gcvFEATURE_TEXTURE_ANISOTROPIC_FILTERING,
++ gcvFEATURE_TEXTURE_FLOAT_HALF_FLOAT,
++ gcvFEATURE_2D_ROTATION_STALL_FIX,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX,
++ gcvFEATURE_BUG_FIXES10,
++ gcvFEATURE_2D_MINOR_TILING,
++ /* Supertiled compressed textures are supported. */
++ gcvFEATURE_TEX_COMPRRESSION_SUPERTILED,
++ gcvFEATURE_FAST_MSAA,
++ gcvFEATURE_BUG_FIXED_INDEXED_TRIANGLE_STRIP,
++ gcvFEATURE_TEXTURE_TILED_READ,
++ gcvFEATURE_DEPTH_BIAS_FIX,
++ gcvFEATURE_RECT_PRIMITIVE,
++ gcvFEATURE_BUG_FIXES11,
++ gcvFEATURE_SUPERTILED_TEXTURE,
++ gcvFEATURE_2D_NO_COLORBRUSH_INDEX8,
++ gcvFEATURE_RS_YUV_TARGET,
++ gcvFEATURE_2D_FC_SOURCE,
++ gcvFEATURE_PE_DITHER_FIX,
++ gcvFEATURE_2D_YUV_SEPARATE_STRIDE,
++ gcvFEATURE_FRUSTUM_CLIP_FIX,
++ gcvFEATURE_TEXTURE_LINEAR,
++ gcvFEATURE_TEXTURE_YUV_ASSEMBLER,
++ gcvFEATURE_SHADER_HAS_INSTRUCTION_CACHE,
++ gcvFEATURE_DYNAMIC_FREQUENCY_SCALING,
++ gcvFEATURE_BUGFIX15,
++ gcvFEATURE_2D_GAMMA,
++ gcvFEATURE_2D_COLOR_SPACE_CONVERSION,
++ gcvFEATURE_2D_SUPER_TILE_VERSION,
++ gcvFEATURE_2D_MIRROR_EXTENSION,
++ gcvFEATURE_2D_SUPER_TILE_V1,
++ gcvFEATURE_2D_SUPER_TILE_V2,
++ gcvFEATURE_2D_SUPER_TILE_V3,
++ gcvFEATURE_2D_MULTI_SOURCE_BLT_EX2,
++ gcvFEATURE_ELEMENT_INDEX_UINT,
++ gcvFEATURE_2D_COMPRESSION,
++ gcvFEATURE_2D_OPF_YUV_OUTPUT,
++ gcvFEATURE_2D_MULTI_SRC_BLT_TO_UNIFIED_DST_RECT,
++ gcvFEATURE_2D_YUV_MODE,
++ gcvFEATURE_DECOMPRESS_Z16,
++ gcvFEATURE_LINEAR_RENDER_TARGET,
++ gcvFEATURE_BUG_FIXES8,
++ gcvFEATURE_HALTI2,
++ gcvFEATURE_MMU,
++}
++gceFEATURE;
++
++/* Chip Power Status. */
++typedef enum _gceCHIPPOWERSTATE
++{
++ gcvPOWER_ON = 0,
++ gcvPOWER_OFF,
++ gcvPOWER_IDLE,
++ gcvPOWER_SUSPEND,
++ gcvPOWER_SUSPEND_ATPOWERON,
++ gcvPOWER_OFF_ATPOWERON,
++ gcvPOWER_IDLE_BROADCAST,
++ gcvPOWER_SUSPEND_BROADCAST,
++ gcvPOWER_OFF_BROADCAST,
++ gcvPOWER_OFF_RECOVERY,
++ gcvPOWER_OFF_TIMEOUT,
++ gcvPOWER_ON_AUTO
++}
++gceCHIPPOWERSTATE;
++
++/* CPU cache operations */
++typedef enum _gceCACHEOPERATION
++{
++ gcvCACHE_CLEAN = 0x01,
++ gcvCACHE_INVALIDATE = 0x02,
++ gcvCACHE_FLUSH = gcvCACHE_CLEAN | gcvCACHE_INVALIDATE,
++ gcvCACHE_MEMORY_BARRIER = 0x04
++}
++gceCACHEOPERATION;
++
++/* Surface types. */
++typedef enum _gceSURF_TYPE
++{
++ gcvSURF_TYPE_UNKNOWN = 0,
++ gcvSURF_INDEX,
++ gcvSURF_VERTEX,
++ gcvSURF_TEXTURE,
++ gcvSURF_RENDER_TARGET,
++ gcvSURF_DEPTH,
++ gcvSURF_BITMAP,
++ gcvSURF_TILE_STATUS,
++ gcvSURF_IMAGE,
++ gcvSURF_MASK,
++ gcvSURF_SCISSOR,
++ gcvSURF_HIERARCHICAL_DEPTH,
++ gcvSURF_NUM_TYPES, /* Make sure this is the last one! */
++
++ /* Combinations. */
++ gcvSURF_NO_TILE_STATUS = 0x100,
++ gcvSURF_NO_VIDMEM = 0x200, /* Used to allocate surfaces with no underlying vidmem node.
++ In Android, vidmem node is allocated by another process. */
++ gcvSURF_CACHEABLE = 0x400, /* Used to allocate a cacheable surface */
++ gcvSURF_FLIP = 0x800, /* The Resolve Target the will been flip resolve from RT */
++ gcvSURF_TILE_STATUS_DIRTY = 0x1000, /* Init tile status to all dirty */
++
++ gcvSURF_LINEAR = 0x2000,
++ gcvSURF_VG = 0x4000,
++
++ gcvSURF_TEXTURE_LINEAR = gcvSURF_TEXTURE
++ | gcvSURF_LINEAR,
++
++ gcvSURF_RENDER_TARGET_NO_TILE_STATUS = gcvSURF_RENDER_TARGET
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_RENDER_TARGET_TS_DIRTY = gcvSURF_RENDER_TARGET
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ gcvSURF_DEPTH_NO_TILE_STATUS = gcvSURF_DEPTH
++ | gcvSURF_NO_TILE_STATUS,
++
++ gcvSURF_DEPTH_TS_DIRTY = gcvSURF_DEPTH
++ | gcvSURF_TILE_STATUS_DIRTY,
++
++ /* Supported surface types with no vidmem node. */
++ gcvSURF_BITMAP_NO_VIDMEM = gcvSURF_BITMAP
++ | gcvSURF_NO_VIDMEM,
++
++ gcvSURF_TEXTURE_NO_VIDMEM = gcvSURF_TEXTURE
++ | gcvSURF_NO_VIDMEM,
++
++ /* Cacheable surface types with no vidmem node. */
++ gcvSURF_CACHEABLE_BITMAP_NO_VIDMEM = gcvSURF_BITMAP_NO_VIDMEM
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_CACHEABLE_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_CACHEABLE,
++
++ gcvSURF_FLIP_BITMAP = gcvSURF_BITMAP
++ | gcvSURF_FLIP,
++}
++gceSURF_TYPE;
++
++typedef enum _gceSURF_USAGE
++{
++ gcvSURF_USAGE_UNKNOWN,
++ gcvSURF_USAGE_RESOLVE_AFTER_CPU,
++ gcvSURF_USAGE_RESOLVE_AFTER_3D
++}
++gceSURF_USAGE;
++
++typedef enum _gceSURF_COLOR_TYPE
++{
++ gcvSURF_COLOR_UNKNOWN = 0,
++ gcvSURF_COLOR_LINEAR = 0x01,
++ gcvSURF_COLOR_ALPHA_PRE = 0x02,
++}
++gceSURF_COLOR_TYPE;
++
++/* Rotation. */
++typedef enum _gceSURF_ROTATION
++{
++ gcvSURF_0_DEGREE = 0,
++ gcvSURF_90_DEGREE,
++ gcvSURF_180_DEGREE,
++ gcvSURF_270_DEGREE,
++ gcvSURF_FLIP_X,
++ gcvSURF_FLIP_Y,
++
++ gcvSURF_POST_FLIP_X = 0x40000000,
++ gcvSURF_POST_FLIP_Y = 0x80000000,
++}
++gceSURF_ROTATION;
++
++typedef enum _gceMIPMAP_IMAGE_FORMAT
++{
++ gcvUNKNOWN_MIPMAP_IMAGE_FORMAT = -2
++}
++gceMIPMAP_IMAGE_FORMAT;
++
++
++/* Surface formats. */
++typedef enum _gceSURF_FORMAT
++{
++ /* Unknown format. */
++ gcvSURF_UNKNOWN = 0,
++
++ /* Palettized formats. */
++ gcvSURF_INDEX1 = 100,
++ gcvSURF_INDEX4,
++ gcvSURF_INDEX8,
++
++ /* RGB formats. */
++ gcvSURF_A2R2G2B2 = 200,
++ gcvSURF_R3G3B2,
++ gcvSURF_A8R3G3B2,
++ gcvSURF_X4R4G4B4,
++ gcvSURF_A4R4G4B4,
++ gcvSURF_R4G4B4A4,
++ gcvSURF_X1R5G5B5,
++ gcvSURF_A1R5G5B5,
++ gcvSURF_R5G5B5A1,
++ gcvSURF_R5G6B5,
++ gcvSURF_R8G8B8,
++ gcvSURF_X8R8G8B8,
++ gcvSURF_A8R8G8B8,
++ gcvSURF_R8G8B8A8,
++ gcvSURF_G8R8G8B8,
++ gcvSURF_R8G8B8G8,
++ gcvSURF_X2R10G10B10,
++ gcvSURF_A2R10G10B10,
++ gcvSURF_X12R12G12B12,
++ gcvSURF_A12R12G12B12,
++ gcvSURF_X16R16G16B16,
++ gcvSURF_A16R16G16B16,
++ gcvSURF_A32R32G32B32,
++ gcvSURF_R8G8B8X8,
++ gcvSURF_R5G5B5X1,
++ gcvSURF_R4G4B4X4,
++
++ /* BGR formats. */
++ gcvSURF_A4B4G4R4 = 300,
++ gcvSURF_A1B5G5R5,
++ gcvSURF_B5G6R5,
++ gcvSURF_B8G8R8,
++ gcvSURF_B16G16R16,
++ gcvSURF_X8B8G8R8,
++ gcvSURF_A8B8G8R8,
++ gcvSURF_A2B10G10R10,
++ gcvSURF_X16B16G16R16,
++ gcvSURF_A16B16G16R16,
++ gcvSURF_B32G32R32,
++ gcvSURF_X32B32G32R32,
++ gcvSURF_A32B32G32R32,
++ gcvSURF_B4G4R4A4,
++ gcvSURF_B5G5R5A1,
++ gcvSURF_B8G8R8X8,
++ gcvSURF_B8G8R8A8,
++ gcvSURF_X4B4G4R4,
++ gcvSURF_X1B5G5R5,
++ gcvSURF_B4G4R4X4,
++ gcvSURF_B5G5R5X1,
++ gcvSURF_X2B10G10R10,
++
++ /* Compressed formats. */
++ gcvSURF_DXT1 = 400,
++ gcvSURF_DXT2,
++ gcvSURF_DXT3,
++ gcvSURF_DXT4,
++ gcvSURF_DXT5,
++ gcvSURF_CXV8U8,
++ gcvSURF_ETC1,
++ gcvSURF_R11_EAC,
++ gcvSURF_SIGNED_R11_EAC,
++ gcvSURF_RG11_EAC,
++ gcvSURF_SIGNED_RG11_EAC,
++ gcvSURF_RGB8_ETC2,
++ gcvSURF_SRGB8_ETC2,
++ gcvSURF_RGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2,
++ gcvSURF_RGBA8_ETC2_EAC,
++ gcvSURF_SRGB8_ALPHA8_ETC2_EAC,
++
++ /* YUV formats. */
++ gcvSURF_YUY2 = 500,
++ gcvSURF_UYVY,
++ gcvSURF_YV12,
++ gcvSURF_I420,
++ gcvSURF_NV12,
++ gcvSURF_NV21,
++ gcvSURF_NV16,
++ gcvSURF_NV61,
++ gcvSURF_YVYU,
++ gcvSURF_VYUY,
++
++ /* Depth formats. */
++ gcvSURF_D16 = 600,
++ gcvSURF_D24S8,
++ gcvSURF_D32,
++ gcvSURF_D24X8,
++
++ /* Alpha formats. */
++ gcvSURF_A4 = 700,
++ gcvSURF_A8,
++ gcvSURF_A12,
++ gcvSURF_A16,
++ gcvSURF_A32,
++ gcvSURF_A1,
++
++ /* Luminance formats. */
++ gcvSURF_L4 = 800,
++ gcvSURF_L8,
++ gcvSURF_L12,
++ gcvSURF_L16,
++ gcvSURF_L32,
++ gcvSURF_L1,
++
++ /* Alpha/Luminance formats. */
++ gcvSURF_A4L4 = 900,
++ gcvSURF_A2L6,
++ gcvSURF_A8L8,
++ gcvSURF_A4L12,
++ gcvSURF_A12L12,
++ gcvSURF_A16L16,
++
++ /* Bump formats. */
++ gcvSURF_L6V5U5 = 1000,
++ gcvSURF_V8U8,
++ gcvSURF_X8L8V8U8,
++ gcvSURF_Q8W8V8U8,
++ gcvSURF_A2W10V10U10,
++ gcvSURF_V16U16,
++ gcvSURF_Q16W16V16U16,
++
++ /* R/RG/RA formats. */
++ gcvSURF_R8 = 1100,
++ gcvSURF_X8R8,
++ gcvSURF_G8R8,
++ gcvSURF_X8G8R8,
++ gcvSURF_A8R8,
++ gcvSURF_R16,
++ gcvSURF_X16R16,
++ gcvSURF_G16R16,
++ gcvSURF_X16G16R16,
++ gcvSURF_A16R16,
++ gcvSURF_R32,
++ gcvSURF_X32R32,
++ gcvSURF_G32R32,
++ gcvSURF_X32G32R32,
++ gcvSURF_A32R32,
++ gcvSURF_RG16,
++
++ /* Floating point formats. */
++ gcvSURF_R16F = 1200,
++ gcvSURF_X16R16F,
++ gcvSURF_G16R16F,
++ gcvSURF_X16G16R16F,
++ gcvSURF_B16G16R16F,
++ gcvSURF_X16B16G16R16F,
++ gcvSURF_A16B16G16R16F,
++ gcvSURF_R32F,
++ gcvSURF_X32R32F,
++ gcvSURF_G32R32F,
++ gcvSURF_X32G32R32F,
++ gcvSURF_B32G32R32F,
++ gcvSURF_X32B32G32R32F,
++ gcvSURF_A32B32G32R32F,
++ gcvSURF_A16F,
++ gcvSURF_L16F,
++ gcvSURF_A16L16F,
++ gcvSURF_A16R16F,
++ gcvSURF_A32F,
++ gcvSURF_L32F,
++ gcvSURF_A32L32F,
++ gcvSURF_A32R32F,
++
++}
++gceSURF_FORMAT;
++
++/* Pixel swizzle modes. */
++typedef enum _gceSURF_SWIZZLE
++{
++ gcvSURF_NOSWIZZLE = 0,
++ gcvSURF_ARGB,
++ gcvSURF_ABGR,
++ gcvSURF_RGBA,
++ gcvSURF_BGRA
++}
++gceSURF_SWIZZLE;
++
++/* Transparency modes. */
++typedef enum _gceSURF_TRANSPARENCY
++{
++ /* Valid only for PE 1.0 */
++ gcvSURF_OPAQUE = 0,
++ gcvSURF_SOURCE_MATCH,
++ gcvSURF_SOURCE_MASK,
++ gcvSURF_PATTERN_MASK,
++}
++gceSURF_TRANSPARENCY;
++
++/* Surface Alignment. */
++typedef enum _gceSURF_ALIGNMENT
++{
++ gcvSURF_FOUR = 0,
++ gcvSURF_SIXTEEN,
++ gcvSURF_SUPER_TILED,
++ gcvSURF_SPLIT_TILED,
++ gcvSURF_SPLIT_SUPER_TILED,
++}
++gceSURF_ALIGNMENT;
++
++
++/* Surface Addressing. */
++typedef enum _gceSURF_ADDRESSING
++{
++ gcvSURF_NO_STRIDE_TILED = 0,
++ gcvSURF_NO_STRIDE_LINEAR,
++ gcvSURF_STRIDE_TILED,
++ gcvSURF_STRIDE_LINEAR
++}
++gceSURF_ADDRESSING;
++
++/* Transparency modes. */
++typedef enum _gce2D_TRANSPARENCY
++{
++ /* Valid only for PE 2.0 */
++ gcv2D_OPAQUE = 0,
++ gcv2D_KEYED,
++ gcv2D_MASKED
++}
++gce2D_TRANSPARENCY;
++
++/* Mono packing modes. */
++typedef enum _gceSURF_MONOPACK
++{
++ gcvSURF_PACKED8 = 0,
++ gcvSURF_PACKED16,
++ gcvSURF_PACKED32,
++ gcvSURF_UNPACKED,
++}
++gceSURF_MONOPACK;
++
++/* Blending modes. */
++typedef enum _gceSURF_BLEND_MODE
++{
++ /* Porter-Duff blending modes. */
++ /* Fsrc Fdst */
++ gcvBLEND_CLEAR = 0, /* 0 0 */
++ gcvBLEND_SRC, /* 1 0 */
++ gcvBLEND_DST, /* 0 1 */
++ gcvBLEND_SRC_OVER_DST, /* 1 1 - Asrc */
++ gcvBLEND_DST_OVER_SRC, /* 1 - Adst 1 */
++ gcvBLEND_SRC_IN_DST, /* Adst 0 */
++ gcvBLEND_DST_IN_SRC, /* 0 Asrc */
++ gcvBLEND_SRC_OUT_DST, /* 1 - Adst 0 */
++ gcvBLEND_DST_OUT_SRC, /* 0 1 - Asrc */
++ gcvBLEND_SRC_ATOP_DST, /* Adst 1 - Asrc */
++ gcvBLEND_DST_ATOP_SRC, /* 1 - Adst Asrc */
++ gcvBLEND_SRC_XOR_DST, /* 1 - Adst 1 - Asrc */
++
++ /* Special blending modes. */
++ gcvBLEND_SET, /* DST = 1 */
++ gcvBLEND_SUB /* DST = DST * (1 - SRC) */
++}
++gceSURF_BLEND_MODE;
++
++/* Per-pixel alpha modes. */
++typedef enum _gceSURF_PIXEL_ALPHA_MODE
++{
++ gcvSURF_PIXEL_ALPHA_STRAIGHT = 0,
++ gcvSURF_PIXEL_ALPHA_INVERSED
++}
++gceSURF_PIXEL_ALPHA_MODE;
++
++/* Global alpha modes. */
++typedef enum _gceSURF_GLOBAL_ALPHA_MODE
++{
++ gcvSURF_GLOBAL_ALPHA_OFF = 0,
++ gcvSURF_GLOBAL_ALPHA_ON,
++ gcvSURF_GLOBAL_ALPHA_SCALE
++}
++gceSURF_GLOBAL_ALPHA_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gceSURF_PIXEL_COLOR_MODE
++{
++ gcvSURF_COLOR_STRAIGHT = 0,
++ gcvSURF_COLOR_MULTIPLY
++}
++gceSURF_PIXEL_COLOR_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_PIXEL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_COLOR_MULTIPLY_ENABLE
++}
++gce2D_PIXEL_COLOR_MULTIPLY_MODE;
++
++/* Color component modes for alpha blending. */
++typedef enum _gce2D_GLOBAL_COLOR_MULTIPLY_MODE
++{
++ gcv2D_GLOBAL_COLOR_MULTIPLY_DISABLE = 0,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_ALPHA,
++ gcv2D_GLOBAL_COLOR_MULTIPLY_COLOR
++}
++gce2D_GLOBAL_COLOR_MULTIPLY_MODE;
++
++/* Alpha blending factor modes. */
++typedef enum _gceSURF_BLEND_FACTOR_MODE
++{
++ gcvSURF_BLEND_ZERO = 0,
++ gcvSURF_BLEND_ONE,
++ gcvSURF_BLEND_STRAIGHT,
++ gcvSURF_BLEND_INVERSED,
++ gcvSURF_BLEND_COLOR,
++ gcvSURF_BLEND_COLOR_INVERSED,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED,
++ gcvSURF_BLEND_STRAIGHT_NO_CROSS,
++ gcvSURF_BLEND_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_COLOR_NO_CROSS,
++ gcvSURF_BLEND_COLOR_INVERSED_NO_CROSS,
++ gcvSURF_BLEND_SRC_ALPHA_SATURATED_CROSS
++}
++gceSURF_BLEND_FACTOR_MODE;
++
++/* Alpha blending porter duff rules. */
++typedef enum _gce2D_PORTER_DUFF_RULE
++{
++ gcvPD_CLEAR = 0,
++ gcvPD_SRC,
++ gcvPD_SRC_OVER,
++ gcvPD_DST_OVER,
++ gcvPD_SRC_IN,
++ gcvPD_DST_IN,
++ gcvPD_SRC_OUT,
++ gcvPD_DST_OUT,
++ gcvPD_SRC_ATOP,
++ gcvPD_DST_ATOP,
++ gcvPD_ADD,
++ gcvPD_XOR,
++ gcvPD_DST
++}
++gce2D_PORTER_DUFF_RULE;
++
++/* Alpha blending factor modes. */
++typedef enum _gce2D_YUV_COLOR_MODE
++{
++ gcv2D_YUV_601= 0,
++ gcv2D_YUV_709,
++ gcv2D_YUV_USER_DEFINED,
++ gcv2D_YUV_USER_DEFINED_CLAMP,
++
++ /* Default setting is for src. gcv2D_YUV_DST
++ can be ORed to set dst.
++ */
++ gcv2D_YUV_DST = 0x80000000,
++}
++gce2D_YUV_COLOR_MODE;
++
++typedef enum _gce2D_COMMAND
++{
++ gcv2D_CLEAR = 0,
++ gcv2D_LINE,
++ gcv2D_BLT,
++ gcv2D_STRETCH,
++ gcv2D_HOR_FILTER,
++ gcv2D_VER_FILTER,
++ gcv2D_MULTI_SOURCE_BLT,
++}
++gce2D_COMMAND;
++
++typedef enum _gce2D_TILE_STATUS_CONFIG
++{
++ gcv2D_TSC_DISABLE = 0,
++ gcv2D_TSC_ENABLE = 0x00000001,
++ gcv2D_TSC_COMPRESSED = 0x00000002,
++ gcv2D_TSC_DOWN_SAMPLER = 0x00000004,
++ gcv2D_TSC_2D_COMPRESSED = 0x00000008,
++}
++gce2D_TILE_STATUS_CONFIG;
++
++typedef enum _gce2D_QUERY
++{
++ gcv2D_QUERY_RGB_ADDRESS_MIN_ALIGN = 0,
++ gcv2D_QUERY_RGB_STRIDE_MIN_ALIGN,
++ gcv2D_QUERY_YUV_ADDRESS_MIN_ALIGN,
++ gcv2D_QUERY_YUV_STRIDE_MIN_ALIGN,
++}
++gce2D_QUERY;
++
++typedef enum _gce2D_SUPER_TILE_VERSION
++{
++ gcv2D_SUPER_TILE_VERSION_V1 = 1,
++ gcv2D_SUPER_TILE_VERSION_V2 = 2,
++ gcv2D_SUPER_TILE_VERSION_V3 = 3,
++}
++gce2D_SUPER_TILE_VERSION;
++
++typedef enum _gce2D_STATE
++{
++ gcv2D_STATE_SPECIAL_FILTER_MIRROR_MODE = 1,
++ gcv2D_STATE_SUPER_TILE_VERSION,
++ gcv2D_STATE_EN_GAMMA,
++ gcv2D_STATE_DE_GAMMA,
++ gcv2D_STATE_MULTI_SRC_BLIT_UNIFIED_DST_RECT,
++ gcv2D_STATE_XRGB_ENABLE,
++
++ gcv2D_STATE_ARRAY_EN_GAMMA = 0x10001,
++ gcv2D_STATE_ARRAY_DE_GAMMA,
++ gcv2D_STATE_ARRAY_CSC_YUV_TO_RGB,
++ gcv2D_STATE_ARRAY_CSC_RGB_TO_YUV,
++}
++gce2D_STATE;
++
++#ifndef VIVANTE_NO_3D
++/* Texture functions. */
++typedef enum _gceTEXTURE_FUNCTION
++{
++ gcvTEXTURE_DUMMY = 0,
++ gcvTEXTURE_REPLACE = 0,
++ gcvTEXTURE_MODULATE,
++ gcvTEXTURE_ADD,
++ gcvTEXTURE_ADD_SIGNED,
++ gcvTEXTURE_INTERPOLATE,
++ gcvTEXTURE_SUBTRACT,
++ gcvTEXTURE_DOT3
++}
++gceTEXTURE_FUNCTION;
++
++/* Texture sources. */
++typedef enum _gceTEXTURE_SOURCE
++{
++ gcvCOLOR_FROM_TEXTURE = 0,
++ gcvCOLOR_FROM_CONSTANT_COLOR,
++ gcvCOLOR_FROM_PRIMARY_COLOR,
++ gcvCOLOR_FROM_PREVIOUS_COLOR
++}
++gceTEXTURE_SOURCE;
++
++/* Texture source channels. */
++typedef enum _gceTEXTURE_CHANNEL
++{
++ gcvFROM_COLOR = 0,
++ gcvFROM_ONE_MINUS_COLOR,
++ gcvFROM_ALPHA,
++ gcvFROM_ONE_MINUS_ALPHA
++}
++gceTEXTURE_CHANNEL;
++#endif /* VIVANTE_NO_3D */
++
++/* Filter types. */
++typedef enum _gceFILTER_TYPE
++{
++ gcvFILTER_SYNC = 0,
++ gcvFILTER_BLUR,
++ gcvFILTER_USER
++}
++gceFILTER_TYPE;
++
++/* Filter pass types. */
++typedef enum _gceFILTER_PASS_TYPE
++{
++ gcvFILTER_HOR_PASS = 0,
++ gcvFILTER_VER_PASS
++}
++gceFILTER_PASS_TYPE;
++
++/* Endian hints. */
++typedef enum _gceENDIAN_HINT
++{
++ gcvENDIAN_NO_SWAP = 0,
++ gcvENDIAN_SWAP_WORD,
++ gcvENDIAN_SWAP_DWORD
++}
++gceENDIAN_HINT;
++
++/* Tiling modes. */
++typedef enum _gceTILING
++{
++ gcvLINEAR = 0,
++ gcvTILED,
++ gcvSUPERTILED,
++ gcvMULTI_TILED,
++ gcvMULTI_SUPERTILED,
++ gcvMINORTILED,
++}
++gceTILING;
++
++/* 2D pattern type. */
++typedef enum _gce2D_PATTERN
++{
++ gcv2D_PATTERN_SOLID = 0,
++ gcv2D_PATTERN_MONO,
++ gcv2D_PATTERN_COLOR,
++ gcv2D_PATTERN_INVALID
++}
++gce2D_PATTERN;
++
++/* 2D source type. */
++typedef enum _gce2D_SOURCE
++{
++ gcv2D_SOURCE_MASKED = 0,
++ gcv2D_SOURCE_MONO,
++ gcv2D_SOURCE_COLOR,
++ gcv2D_SOURCE_INVALID
++}
++gce2D_SOURCE;
++
++/* Pipes. */
++typedef enum _gcePIPE_SELECT
++{
++ gcvPIPE_INVALID = ~0,
++ gcvPIPE_3D = 0,
++ gcvPIPE_2D
++}
++gcePIPE_SELECT;
++
++/* Hardware type. */
++typedef enum _gceHARDWARE_TYPE
++{
++ gcvHARDWARE_INVALID = 0x00,
++ gcvHARDWARE_3D = 0x01,
++ gcvHARDWARE_2D = 0x02,
++ gcvHARDWARE_VG = 0x04,
++
++ gcvHARDWARE_3D2D = gcvHARDWARE_3D | gcvHARDWARE_2D
++}
++gceHARDWARE_TYPE;
++
++#define gcdCHIP_COUNT 3
++
++typedef enum _gceMMU_MODE
++{
++ gcvMMU_MODE_1K,
++ gcvMMU_MODE_4K,
++} gceMMU_MODE;
++
++/* User signal command codes. */
++typedef enum _gceUSER_SIGNAL_COMMAND_CODES
++{
++ gcvUSER_SIGNAL_CREATE,
++ gcvUSER_SIGNAL_DESTROY,
++ gcvUSER_SIGNAL_SIGNAL,
++ gcvUSER_SIGNAL_WAIT,
++ gcvUSER_SIGNAL_MAP,
++ gcvUSER_SIGNAL_UNMAP,
++}
++gceUSER_SIGNAL_COMMAND_CODES;
++
++/* Sync point command codes. */
++typedef enum _gceSYNC_POINT_COMMAND_CODES
++{
++ gcvSYNC_POINT_CREATE,
++ gcvSYNC_POINT_DESTROY,
++ gcvSYNC_POINT_SIGNAL,
++}
++gceSYNC_POINT_COMMAND_CODES;
++
++/* Event locations. */
++typedef enum _gceKERNEL_WHERE
++{
++ gcvKERNEL_COMMAND,
++ gcvKERNEL_VERTEX,
++ gcvKERNEL_TRIANGLE,
++ gcvKERNEL_TEXTURE,
++ gcvKERNEL_PIXEL,
++}
++gceKERNEL_WHERE;
++
++#if gcdENABLE_VG
++/* Hardware blocks. */
++typedef enum _gceBLOCK
++{
++ gcvBLOCK_COMMAND,
++ gcvBLOCK_TESSELLATOR,
++ gcvBLOCK_TESSELLATOR2,
++ gcvBLOCK_TESSELLATOR3,
++ gcvBLOCK_RASTER,
++ gcvBLOCK_VG,
++ gcvBLOCK_VG2,
++ gcvBLOCK_VG3,
++ gcvBLOCK_PIXEL,
++
++ /* Number of defined blocks. */
++ gcvBLOCK_COUNT
++}
++gceBLOCK;
++#endif
++
++/* gcdDUMP message type. */
++typedef enum _gceDEBUG_MESSAGE_TYPE
++{
++ gcvMESSAGE_TEXT,
++ gcvMESSAGE_DUMP
++}
++gceDEBUG_MESSAGE_TYPE;
++
++typedef enum _gceSPECIAL_HINT
++{
++ gceSPECIAL_HINT0,
++ gceSPECIAL_HINT1,
++ gceSPECIAL_HINT2,
++ gceSPECIAL_HINT3,
++ /* For disable dynamic stream/index */
++ gceSPECIAL_HINT4
++}
++gceSPECIAL_HINT;
++
++typedef enum _gceMACHINECODE
++{
++ gcvMACHINECODE_HOVERJET0 = 0x0,
++ gcvMACHINECODE_HOVERJET1 ,
++
++ gcvMACHINECODE_TAIJI0 ,
++ gcvMACHINECODE_TAIJI1 ,
++ gcvMACHINECODE_TAIJI2 ,
++
++ gcvMACHINECODE_ANTUTU0 ,
++
++ gcvMACHINECODE_GLB27_RELEASE_0,
++ gcvMACHINECODE_GLB27_RELEASE_1,
++
++ gcvMACHINECODE_WAVESCAPE0 ,
++ gcvMACHINECODE_WAVESCAPE1 ,
++
++ gcvMACHINECODE_NENAMARKV2_4_0 ,
++ gcvMACHINECODE_NENAMARKV2_4_1 ,
++
++ gcvMACHINECODE_GLB25_RELEASE_0,
++ gcvMACHINECODE_GLB25_RELEASE_1,
++ gcvMACHINECODE_GLB25_RELEASE_2,
++}
++gceMACHINECODE;
++
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gckCONTEXT * gckCONTEXT;
++typedef struct _gcoCMDBUF * gcoCMDBUF;
++typedef struct _gcsSTATE_DELTA * gcsSTATE_DELTA_PTR;
++typedef struct _gcsQUEUE * gcsQUEUE_PTR;
++typedef struct _gcoQUEUE * gcoQUEUE;
++typedef struct _gcsHAL_INTERFACE * gcsHAL_INTERFACE_PTR;
++typedef struct _gcs2D_PROFILE * gcs2D_PROFILE_PTR;
++
++#if gcdENABLE_VG
++typedef struct _gcoVGHARDWARE * gcoVGHARDWARE;
++typedef struct _gcoVGBUFFER * gcoVGBUFFER;
++typedef struct _gckVGHARDWARE * gckVGHARDWARE;
++typedef struct _gcsVGCONTEXT * gcsVGCONTEXT_PTR;
++typedef struct _gcsVGCONTEXT_MAP * gcsVGCONTEXT_MAP_PTR;
++typedef struct _gcsVGCMDQUEUE * gcsVGCMDQUEUE_PTR;
++typedef struct _gcsTASK_MASTER_TABLE * gcsTASK_MASTER_TABLE_PTR;
++typedef struct _gckVGKERNEL * gckVGKERNEL;
++typedef void * gctTHREAD;
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_enum_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal.h 2015-07-27 23:13:06.194879670 +0200
+@@ -0,0 +1,2671 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_h_
++#define __gc_hal_h_
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++#include "gc_hal_profiler.h"
++#include "gc_hal_driver.h"
++#ifndef VIVANTE_NO_3D
++#include "gc_hal_statistics.h"
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++******************************* Alignment Macros *******************************
++\******************************************************************************/
++
++#define gcmALIGN(n, align) \
++( \
++ ((n) + ((align) - 1)) & ~((align) - 1) \
++)
++
++#define gcmALIGN_BASE(n, align) \
++( \
++ ((n) & ~((align) - 1)) \
++)
++
++/******************************************************************************\
++***************************** Element Count Macro *****************************
++\******************************************************************************/
++
++#define gcmSIZEOF(a) \
++( \
++ (gctSIZE_T) (sizeof(a)) \
++)
++
++#define gcmCOUNTOF(a) \
++( \
++ sizeof(a) / sizeof(a[0]) \
++)
++
++/******************************************************************************\
++********************************* Cast Macro **********************************
++\******************************************************************************/
++#define gcmNAME_TO_PTR(na) \
++ gckKERNEL_QueryPointerFromName(kernel, gcmALL_TO_UINT32(na))
++
++#define gcmPTR_TO_NAME(ptr) \
++ gckKERNEL_AllocateNameFromPointer(kernel, ptr)
++
++#define gcmRELEASE_NAME(na) \
++ gckKERNEL_DeleteName(kernel, gcmALL_TO_UINT32(na))
++
++#ifdef __LP64__
++
++#define gcmALL_TO_UINT32(t) \
++( \
++ (gctUINT32) (gctUINTPTR_T) (t)\
++)
++
++#define gcmPTR_TO_UINT64(p) \
++( \
++ (gctUINT64) (p)\
++)
++
++#define gcmUINT64_TO_PTR(u) \
++( \
++ (gctPOINTER) (u)\
++)
++
++#else /* 32 bit */
++
++#define gcmALL_TO_UINT32(t) \
++( \
++ (gctUINT32) (t)\
++)
++
++#define gcmPTR_TO_UINT64(p) \
++( \
++ (gctUINT64) (gctUINTPTR_T) (p)\
++)
++
++#define gcmUINT64_TO_PTR(u) \
++( \
++ (gctPOINTER) (gctUINTPTR_T) (u)\
++)
++
++#endif
++
++#define gcmUINT64_TO_TYPE(u, t) \
++( \
++ (t) (gctUINTPTR_T) (u)\
++)
++
++/******************************************************************************\
++******************************** Useful Macro *********************************
++\******************************************************************************/
++
++#define gcvINVALID_ADDRESS ~0U
++
++#define gcmGET_PRE_ROTATION(rotate) \
++ ((rotate) & (~(gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y)))
++
++#define gcmGET_POST_ROTATION(rotate) \
++ ((rotate) & (gcvSURF_POST_FLIP_X | gcvSURF_POST_FLIP_Y))
++
++/******************************************************************************\
++******************************** gcsOBJECT Object *******************************
++\******************************************************************************/
++
++/* Type of objects. */
++typedef enum _gceOBJECT_TYPE
++{
++ gcvOBJ_UNKNOWN = 0,
++ gcvOBJ_2D = gcmCC('2','D',' ',' '),
++ gcvOBJ_3D = gcmCC('3','D',' ',' '),
++ gcvOBJ_ATTRIBUTE = gcmCC('A','T','T','R'),
++ gcvOBJ_BRUSHCACHE = gcmCC('B','R','U','$'),
++ gcvOBJ_BRUSHNODE = gcmCC('B','R','U','n'),
++ gcvOBJ_BRUSH = gcmCC('B','R','U','o'),
++ gcvOBJ_BUFFER = gcmCC('B','U','F','R'),
++ gcvOBJ_COMMAND = gcmCC('C','M','D',' '),
++ gcvOBJ_COMMANDBUFFER = gcmCC('C','M','D','B'),
++ gcvOBJ_CONTEXT = gcmCC('C','T','X','T'),
++ gcvOBJ_DEVICE = gcmCC('D','E','V',' '),
++ gcvOBJ_DUMP = gcmCC('D','U','M','P'),
++ gcvOBJ_EVENT = gcmCC('E','V','N','T'),
++ gcvOBJ_FUNCTION = gcmCC('F','U','N','C'),
++ gcvOBJ_HAL = gcmCC('H','A','L',' '),
++ gcvOBJ_HARDWARE = gcmCC('H','A','R','D'),
++ gcvOBJ_HEAP = gcmCC('H','E','A','P'),
++ gcvOBJ_INDEX = gcmCC('I','N','D','X'),
++ gcvOBJ_INTERRUPT = gcmCC('I','N','T','R'),
++ gcvOBJ_KERNEL = gcmCC('K','E','R','N'),
++ gcvOBJ_KERNEL_FUNCTION = gcmCC('K','F','C','N'),
++ gcvOBJ_MEMORYBUFFER = gcmCC('M','E','M','B'),
++ gcvOBJ_MMU = gcmCC('M','M','U',' '),
++ gcvOBJ_OS = gcmCC('O','S',' ',' '),
++ gcvOBJ_OUTPUT = gcmCC('O','U','T','P'),
++ gcvOBJ_PAINT = gcmCC('P','N','T',' '),
++ gcvOBJ_PATH = gcmCC('P','A','T','H'),
++ gcvOBJ_QUEUE = gcmCC('Q','U','E',' '),
++ gcvOBJ_SAMPLER = gcmCC('S','A','M','P'),
++ gcvOBJ_SHADER = gcmCC('S','H','D','R'),
++ gcvOBJ_STREAM = gcmCC('S','T','R','M'),
++ gcvOBJ_SURF = gcmCC('S','U','R','F'),
++ gcvOBJ_TEXTURE = gcmCC('T','X','T','R'),
++ gcvOBJ_UNIFORM = gcmCC('U','N','I','F'),
++ gcvOBJ_VARIABLE = gcmCC('V','A','R','I'),
++ gcvOBJ_VERTEX = gcmCC('V','R','T','X'),
++ gcvOBJ_VIDMEM = gcmCC('V','M','E','M'),
++ gcvOBJ_VG = gcmCC('V','G',' ',' '),
++}
++gceOBJECT_TYPE;
++
++/* gcsOBJECT object defintinon. */
++typedef struct _gcsOBJECT
++{
++ /* Type of an object. */
++ gceOBJECT_TYPE type;
++}
++gcsOBJECT;
++
++typedef struct _gckHARDWARE * gckHARDWARE;
++
++/* CORE flags. */
++typedef enum _gceCORE
++{
++ gcvCORE_MAJOR = 0x0,
++ gcvCORE_2D = 0x1,
++ gcvCORE_VG = 0x2
++}
++gceCORE;
++
++#define gcdMAX_GPU_COUNT 3
++
++/*******************************************************************************
++**
++** gcmVERIFY_OBJECT
++**
++** Assert if an object is invalid or is not of the specified type. If the
++** object is invalid or not of the specified type, gcvSTATUS_INVALID_OBJECT
++** will be returned from the current function. In retail mode this macro
++** does nothing.
++**
++** ARGUMENTS:
++**
++** obj Object to test.
++** t Expected type of the object.
++*/
++#if gcmIS_DEBUG(gcdDEBUG_TRACE)
++#define _gcmVERIFY_OBJECT(prefix, obj, t) \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("status=%d", gcvSTATUS_INVALID_OBJECT); \
++ return gcvSTATUS_INVALID_OBJECT; \
++ }
++
++# define gcmVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcm, obj, t)
++# define gcmkVERIFY_OBJECT(obj, t) _gcmVERIFY_OBJECT(gcmk, obj, t)
++#else
++# define gcmVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++# define gcmkVERIFY_OBJECT(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************/
++/*VERIFY_OBJECT if special return expected*/
++/******************************************************************************/
++#ifndef EGL_API_ANDROID
++# define _gcmVERIFY_OBJECT_RETURN(prefix, obj, t, retVal) \
++ do \
++ { \
++ if ((obj) == gcvNULL) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: NULL"); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT((obj) != gcvNULL); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ else if (((gcsOBJECT*) (obj))->type != t) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "VERIFY_OBJECT_RETURN failed: %c%c%c%c", \
++ gcmCC_PRINT(((gcsOBJECT*) (obj))->type)); \
++ prefix##TRACE(gcvLEVEL_ERROR, " expected: %c%c%c%c", \
++ gcmCC_PRINT(t)); \
++ prefix##ASSERT(((gcsOBJECT*)(obj))->type == t); \
++ prefix##FOOTER_ARG("retVal=%d", retVal); \
++ return retVal; \
++ } \
++ } \
++ while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcm, obj, t, retVal)
++# define gcmkVERIFY_OBJECT_RETURN(obj, t, retVal) \
++ _gcmVERIFY_OBJECT_RETURN(gcmk, obj, t, retVal)
++#else
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++# define gcmVERIFY_OBJECT_RETURN(obj, t) do {} while (gcvFALSE)
++#endif
++
++/******************************************************************************\
++********************************** gckOS Object *********************************
++\******************************************************************************/
++
++/* Construct a new gckOS object. */
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ );
++
++/* Destroy an gckOS object. */
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckOS_QueryVideoMemory(
++ IN gckOS Os,
++ OUT gctPHYS_ADDR * InternalAddress,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctPHYS_ADDR * ExternalAddress,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctPHYS_ADDR * ContiguousAddress,
++ OUT gctSIZE_T * ContiguousSize
++ );
++
++/* Allocate memory from the heap. */
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Free allocated memory. */
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Wrapper for allocation memory.. */
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ );
++
++/* Wrapper for freeing memory. */
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Allocate paged memory. */
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ );
++
++/* Lock pages. */
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Logical,
++#endif
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ );
++
++/* Map pages. */
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Logical,
++#endif
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ );
++
++/* Unlock pages. */
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Free paged memory. */
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Allocate non-paged memory. */
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free non-paged memory. */
++gceSTATUS
++gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ );
++
++/* Allocate contiguous memory. */
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ );
++
++/* Free contiguous memory. */
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Get the number fo bytes per page. */
++gceSTATUS
++gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Get the physical address of a corresponding logical address. */
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ );
++
++/* Map physical memory. */
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap previously mapped physical memory. */
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Read data from a hardware register. */
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Write data to a hardware register. */
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ );
++
++/* Write data to a 32-bit memory location. */
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ );
++
++/* Map physical memory into the process space. */
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap physical memory from the specified process space. */
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ );
++
++/* Unmap physical memory from the process space. */
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Unmap user logical memory out of physical memory.
++ * This function is only supported in Linux currently.
++ */
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Create a new mutex. */
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ );
++
++/* Delete a mutex. */
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Acquire a mutex. */
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ );
++
++/* Release a mutex. */
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ );
++
++/* Atomically exchange a pair of 32-bit values. */
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ );
++
++/* Atomically exchange a pair of pointers. */
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ );
++
++#if gcdSMP
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ );
++#endif
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ );
++
++
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ );
++
++/* Delay a number of microseconds. */
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ );
++
++/* Get time in milliseconds. */
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ );
++
++/* Compare time value. */
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ );
++
++/* Get time in microseconds. */
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ );
++
++/* Memory barrier. */
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ );
++
++/* Map user pointer. */
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Unmap user pointer. */
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ );
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ );
++
++#ifdef __QNXNTO__
++/* Map user physical address. */
++gceSTATUS
++gckOS_MapUserPhysical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Phys,
++ OUT gctPOINTER * KernelPointer
++ );
++#endif
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ );
++
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++/* Get the base address for the physical memory. */
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ );
++
++/* Perform a memory copy. */
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ );
++
++/* Zero memory. */
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ );
++
++/* Device I/O control to the kernel HAL layer. */
++gceSTATUS
++gckOS_DeviceControl(
++ IN gckOS Os,
++ IN gctBOOL FromUser,
++ IN gctUINT32 IoControlCode,
++ IN gctPOINTER InputBuffer,
++ IN gctSIZE_T InputBufferSize,
++ OUT gctPOINTER OutputBuffer,
++ IN gctSIZE_T OutputBufferSize
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++gceSTATUS
++gckOS_GetCurrentProcessID(
++ OUT gctUINT32_PTR ProcessID
++ );
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ );
++
++/******************************************************************************\
++********************************** Signal Object *********************************
++\******************************************************************************/
++
++/* Create a signal. */
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ );
++
++/* Destroy a signal. */
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Signal a signal. */
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ );
++
++/* Wait for a signal. */
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ );
++
++/* Map a user signal to the kernel space. */
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ );
++
++/* Unmap a user signal */
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ );
++
++/* Map user memory. */
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ );
++
++/* Unmap user memory. */
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ );
++
++/******************************************************************************\
++************************** Android Native Fence Sync ***************************
++\******************************************************************************/
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ );
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ );
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ );
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ );
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ );
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ );
++
++#if !USE_NEW_LINUX_SIGNAL
++/* Create signal to be used in the user space. */
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ );
++
++/* Destroy signal used in the user space. */
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ );
++
++/* Wait for signal used in the user space. */
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ );
++
++/* Signal a signal used in the user space. */
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ );
++#endif /* USE_NEW_LINUX_SIGNAL */
++
++/* Set a signal owned by a process. */
++#if defined(__QNXNTO__)
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctINT Recvid,
++ IN gctINT Coid
++ );
++#else
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ );
++#endif
++
++/******************************************************************************\
++** Cache Support
++*/
++
++gceSTATUS
++gckOS_CacheClean(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheFlush(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++gceSTATUS
++gckOS_CacheInvalidate(
++ gckOS Os,
++ gctUINT32 ProcessID,
++ gctPHYS_ADDR Handle,
++ gctPOINTER Physical,
++ gctPOINTER Logical,
++ gctSIZE_T Bytes
++ );
++
++/******************************************************************************\
++** Debug Support
++*/
++
++void
++gckOS_SetDebugLevel(
++ IN gctUINT32 Level
++ );
++
++void
++gckOS_SetDebugZone(
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugLevelZone(
++ IN gctUINT32 Level,
++ IN gctUINT32 Zone
++ );
++
++void
++gckOS_SetDebugZones(
++ IN gctUINT32 Zones,
++ IN gctBOOL Enable
++ );
++
++void
++gckOS_SetDebugFile(
++ IN gctCONST_STRING FileName
++ );
++
++/*******************************************************************************
++** Broadcast interface.
++*/
++
++typedef enum _gceBROADCAST
++{
++ /* GPU might be idle. */
++ gcvBROADCAST_GPU_IDLE,
++
++ /* A commit is going to happen. */
++ gcvBROADCAST_GPU_COMMIT,
++
++ /* GPU seems to be stuck. */
++ gcvBROADCAST_GPU_STUCK,
++
++ /* First process gets attached. */
++ gcvBROADCAST_FIRST_PROCESS,
++
++ /* Last process gets detached. */
++ gcvBROADCAST_LAST_PROCESS,
++
++ /* AXI bus error. */
++ gcvBROADCAST_AXI_BUS_ERROR,
++}
++gceBROADCAST;
++
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ );
++
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ );
++
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ );
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.ß
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ );
++
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ );
++
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ );
++
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ );
++
++/*******************************************************************************
++** Semaphores.
++*/
++
++/* Create a new semaphore. */
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ );
++#endif
++
++/* Delete a semahore. */
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Acquire a semahore. */
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Try to acquire a semahore. */
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/* Release a semahore. */
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ );
++
++/*******************************************************************************
++** Timer API.
++*/
++
++typedef void (*gctTIMERFUNCTION)(gctPOINTER);
++
++/* Create a timer. */
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ );
++
++/* Destory a timer. */
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/* Start a timer. */
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ );
++
++/* Stop a timer. */
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ );
++
++/* Get the global video memory mutex. */
++gceSTATUS
++gckOS_GetVideoMemoryMutex(
++ IN gckOS Os,
++ OUT gctPOINTER *Mutex
++ );
++
++/******************************************************************************\
++********************************* gckHEAP Object ********************************
++\******************************************************************************/
++
++typedef struct _gckHEAP * gckHEAP;
++
++/* Construct a new gckHEAP object. */
++gceSTATUS
++gckHEAP_Construct(
++ IN gckOS Os,
++ IN gctSIZE_T AllocationSize,
++ OUT gckHEAP * Heap
++ );
++
++/* Destroy an gckHEAP object. */
++gceSTATUS
++gckHEAP_Destroy(
++ IN gckHEAP Heap
++ );
++
++/* Allocate memory. */
++gceSTATUS
++gckHEAP_Allocate(
++ IN gckHEAP Heap,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckHEAP_Free(
++ IN gckHEAP Heap,
++ IN gctPOINTER Node
++ );
++
++/* Profile the heap. */
++gceSTATUS
++gckHEAP_ProfileStart(
++ IN gckHEAP Heap
++ );
++
++gceSTATUS
++gckHEAP_ProfileEnd(
++ IN gckHEAP Heap,
++ IN gctCONST_STRING Title
++ );
++
++
++/******************************************************************************\
++******************************** gckVIDMEM Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVIDMEM * gckVIDMEM;
++typedef struct _gckKERNEL * gckKERNEL;
++typedef struct _gckDB * gckDB;
++typedef struct _gckDVFS * gckDVFS;
++
++/* Construct a new gckVIDMEM object. */
++gceSTATUS
++gckVIDMEM_Construct(
++ IN gckOS Os,
++ IN gctUINT32 BaseAddress,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Threshold,
++ IN gctSIZE_T Banking,
++ OUT gckVIDMEM * Memory
++ );
++
++/* Destroy an gckVDIMEM object. */
++gceSTATUS
++gckVIDMEM_Destroy(
++ IN gckVIDMEM Memory
++ );
++
++/* Allocate rectangular memory. */
++gceSTATUS
++gckVIDMEM_Allocate(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctUINT Width,
++ IN gctUINT Height,
++ IN gctUINT Depth,
++ IN gctUINT BytesPerPixel,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Allocate linear memory. */
++gceSTATUS
++gckVIDMEM_AllocateLinear(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM Memory,
++ IN gctSIZE_T Bytes,
++ IN gctUINT32 Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Free memory. */
++gceSTATUS
++gckVIDMEM_Free(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/* Lock memory. */
++gceSTATUS
++gckVIDMEM_Lock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gctBOOL Cacheable,
++ OUT gctUINT32 * Address
++ );
++
++/* Unlock memory. */
++gceSTATUS
++gckVIDMEM_Unlock(
++ IN gckKERNEL Kernel,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type,
++ IN OUT gctBOOL * Asynchroneous
++ );
++
++/* Construct a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_ConstructVirtual(
++ IN gckKERNEL Kernel,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Destroy a gcuVIDMEM_NODE union for virtual memory. */
++gceSTATUS
++gckVIDMEM_DestroyVirtual(
++ IN gcuVIDMEM_NODE_PTR Node
++ );
++
++/******************************************************************************\
++******************************** gckKERNEL Object ******************************
++\******************************************************************************/
++
++struct _gcsHAL_INTERFACE;
++
++/* Notifications. */
++typedef enum _gceNOTIFY
++{
++ gcvNOTIFY_INTERRUPT,
++ gcvNOTIFY_COMMAND_QUEUE,
++}
++gceNOTIFY;
++
++/* Flush flags. */
++typedef enum _gceKERNEL_FLUSH
++{
++ gcvFLUSH_COLOR = 0x01,
++ gcvFLUSH_DEPTH = 0x02,
++ gcvFLUSH_TEXTURE = 0x04,
++ gcvFLUSH_2D = 0x08,
++ gcvFLUSH_ALL = gcvFLUSH_COLOR
++ | gcvFLUSH_DEPTH
++ | gcvFLUSH_TEXTURE
++ | gcvFLUSH_2D,
++}
++gceKERNEL_FLUSH;
++
++/* Construct a new gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Context,
++ IN gckDB SharedDB,
++ OUT gckKERNEL * Kernel
++ );
++
++/* Destroy an gckKERNEL object. */
++gceSTATUS
++gckKERNEL_Destroy(
++ IN gckKERNEL Kernel
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query the video memory. */
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Lookup the gckVIDMEM object for a pool. */
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++#if gcdUSE_VIDMEM_PER_PID
++gceSTATUS
++gckKERNEL_GetVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ IN gctUINT32 Pid,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++gceSTATUS
++gckKERNEL_CreateVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ IN gctUINT32 Pid,
++ OUT gckVIDMEM * VideoMemory
++ );
++
++gceSTATUS
++gckKERNEL_RemoveVideoMemoryPoolPid(
++ IN gckKERNEL Kernel,
++ IN gckVIDMEM VideoMemory
++ );
++#endif
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++/* Map video memory. */
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++#ifdef __QNXNTO__
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes,
++#endif
++ OUT gctPOINTER * Logical
++ );
++
++#ifdef __QNXNTO__
++/* Unmap video memory. */
++gceSTATUS
++gckKERNEL_UnmapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Pid,
++ IN gctUINT32 Bytes
++ );
++#endif
++
++/* Map memory. */
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Notification of events. */
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++ IN gceNOTIFY Notifcation,
++ IN gctBOOL Data
++ );
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ );
++
++/*******************************************************************************
++**
++** gckKERNEL_Recovery
++**
++** Try to recover the GPU from a fatal error.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Recovery(
++ IN gckKERNEL Kernel
++ );
++
++/* Set the value of timeout on HW operation. */
++void
++gckKERNEL_SetTimeOut(
++ IN gckKERNEL Kernel,
++ IN gctUINT32 timeOut
++ );
++
++/* Get access to the user data. */
++gceSTATUS
++gckKERNEL_OpenUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctPOINTER StaticStorage,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Release resources associated with the user data connection. */
++gceSTATUS
++gckKERNEL_CloseUserData(
++ IN gckKERNEL Kernel,
++ IN gctBOOL NeedCopy,
++ IN gctBOOL FlushData,
++ IN gctPOINTER UserPointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ );
++
++gceSTATUS
++gckDVFS_Construct(
++ IN gckHARDWARE Hardware,
++ OUT gckDVFS * Frequency
++ );
++
++gceSTATUS
++gckDVFS_Destroy(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Start(
++ IN gckDVFS Dvfs
++ );
++
++gceSTATUS
++gckDVFS_Stop(
++ IN gckDVFS Dvfs
++ );
++
++/******************************************************************************\
++******************************* gckHARDWARE Object *****************************
++\******************************************************************************/
++
++/* Construct a new gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Construct(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gckHARDWARE * Hardware
++ );
++
++/* Destroy an gckHARDWARE object. */
++gceSTATUS
++gckHARDWARE_Destroy(
++ IN gckHARDWARE Hardware
++ );
++
++/* Get hardware type. */
++gceSTATUS
++gckHARDWARE_GetType(
++ IN gckHARDWARE Hardware,
++ OUT gceHARDWARE_TYPE * Type
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckHARDWARE_QuerySystemMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckHARDWARE_BuildVirtualAddress(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckHARDWARE_QueryCommandBuffer(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * Alignment,
++ OUT gctSIZE_T * ReservedHead,
++ OUT gctSIZE_T * ReservedTail
++ );
++
++/* Add a WAIT/LINK pair in the command queue. */
++gceSTATUS
++gckHARDWARE_WaitLink(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctUINT32 * WaitOffset,
++ OUT gctSIZE_T * WaitBytes
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckHARDWARE_Execute(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++#ifdef __QNXNTO__
++ IN gctPOINTER Physical,
++ IN gctBOOL PhysicalAddresses,
++#endif
++ IN gctSIZE_T Bytes
++ );
++
++/* Add an END command in the command queue. */
++gceSTATUS
++gckHARDWARE_End(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a NOP command in the command queue. */
++gceSTATUS
++gckHARDWARE_Nop(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a WAIT command in the command queue. */
++gceSTATUS
++gckHARDWARE_Wait(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Count,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a PIPESELECT command in the command queue. */
++gceSTATUS
++gckHARDWARE_PipeSelect(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gcePIPE_SELECT Pipe,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add a LINK command in the command queue. */
++gceSTATUS
++gckHARDWARE_Link(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctPOINTER FetchAddress,
++ IN gctSIZE_T FetchSize,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Add an EVENT command in the command queue. */
++gceSTATUS
++gckHARDWARE_Event(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT8 Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckHARDWARE_QueryMemory(
++ IN gckHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckHARDWARE_QueryChipIdentity(
++ IN gckHARDWARE Hardware,
++ OUT gcsHAL_QUERY_CHIP_IDENTITY_PTR Identity
++ );
++
++/* Query the shader support. */
++gceSTATUS
++gckHARDWARE_QueryShaderCaps(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT * VertexUniforms,
++ OUT gctUINT * FragmentUniforms,
++ OUT gctUINT * Varyings
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckHARDWARE_SplitMemory(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Update command queue tail pointer. */
++gceSTATUS
++gckHARDWARE_UpdateQueueTail(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Offset
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckHARDWARE_ConvertLogical(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++#ifdef __QNXNTO__
++/* Convert physical address to hardware specific address. */
++gceSTATUS
++gckHARDWARE_ConvertPhysical(
++ IN gckHARDWARE Hardware,
++ IN gctPHYS_ADDR Physical,
++ OUT gctUINT32 * Address
++ );
++#endif
++
++/* Interrupt manager. */
++gceSTATUS
++gckHARDWARE_Interrupt(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL InterruptValid
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckHARDWARE_SetMMU(
++ IN gckHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckHARDWARE_FlushMMU(
++ IN gckHARDWARE Hardware
++ );
++
++/* Set the page table base address. */
++gceSTATUS
++gckHARDWARE_SetMMUv2(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Enable,
++ IN gctPOINTER MtlbAddress,
++ IN gceMMU_MODE Mode,
++ IN gctPOINTER SafeAddress,
++ IN gctBOOL FromPower
++ );
++
++/* Get idle register. */
++gceSTATUS
++gckHARDWARE_GetIdle(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Wait,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckHARDWARE_Flush(
++ IN gckHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckHARDWARE_SetFastClear(
++ IN gckHARDWARE Hardware,
++ IN gctINT Enable,
++ IN gctINT Compression
++ );
++
++gceSTATUS
++gckHARDWARE_ReadInterrupt(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckHARDWARE_SetPowerManagementState(
++ IN gckHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckHARDWARE_QueryPowerManagementState(
++ IN gckHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckHARDWARE_SetPowerManagement(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckHARDWARE_SetGpuProfiler(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL GpuProfiler
++ );
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++gceSTATUS
++gckHARDWARE_SetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 FscaleValue
++ );
++
++gceSTATUS
++gckHARDWARE_GetFscaleValue(
++ IN gckHARDWARE Hardware,
++ IN gctUINT * FscaleValue,
++ IN gctUINT * MinFscaleValue,
++ IN gctUINT * MaxFscaleValue
++ );
++#endif
++
++#if gcdPOWEROFF_TIMEOUT
++gceSTATUS
++gckHARDWARE_SetPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Timeout
++);
++
++gceSTATUS
++gckHARDWARE_QueryPowerOffTimeout(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++);
++#endif
++
++/* Profile 2D Engine. */
++gceSTATUS
++gckHARDWARE_ProfileEngine2D(
++ IN gckHARDWARE Hardware,
++ OUT gcs2D_PROFILE_PTR Profile
++ );
++
++gceSTATUS
++gckHARDWARE_InitializeHardware(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_Reset(
++ IN gckHARDWARE Hardware
++ );
++
++typedef gceSTATUS (*gctISRMANAGERFUNC)(gctPOINTER Context, gceCORE Core);
++
++gceSTATUS
++gckHARDWARE_SetIsrManager(
++ IN gckHARDWARE Hardware,
++ IN gctISRMANAGERFUNC StartIsr,
++ IN gctISRMANAGERFUNC StopIsr,
++ IN gctPOINTER Context
++ );
++
++/* Start a composition. */
++gceSTATUS
++gckHARDWARE_Compose(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Offset,
++ IN gctSIZE_T Size,
++ IN gctUINT8 EventID
++ );
++
++/* Check for Hardware features. */
++gceSTATUS
++gckHARDWARE_IsFeatureAvailable(
++ IN gckHARDWARE Hardware,
++ IN gceFEATURE Feature
++ );
++
++gceSTATUS
++gckHARDWARE_DumpMMUException(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_DumpGPUState(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_InitDVFS(
++ IN gckHARDWARE Hardware
++ );
++
++gceSTATUS
++gckHARDWARE_QueryLoad(
++ IN gckHARDWARE Hardware,
++ OUT gctUINT32 * Load
++ );
++
++gceSTATUS
++gckHARDWARE_SetDVFSPeroid(
++ IN gckHARDWARE Hardware,
++ IN gctUINT32 Frequency
++ );
++
++#if !gcdENABLE_VG
++/******************************************************************************\
++***************************** gckINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckINTERRUPT * gckINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckKERNEL Kernel
++ );
++
++gceSTATUS
++gckINTERRUPT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_Destroy(
++ IN gckINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckINTERRUPT_SetHandler(
++ IN gckINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckINTERRUPT_Notify(
++ IN gckINTERRUPT Interrupt,
++ IN gctBOOL Valid
++ );
++#endif
++/******************************************************************************\
++******************************** gckEVENT Object *******************************
++\******************************************************************************/
++
++typedef struct _gckEVENT * gckEVENT;
++
++/* Construct a new gckEVENT object. */
++gceSTATUS
++gckEVENT_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckEVENT * Event
++ );
++
++/* Destroy an gckEVENT object. */
++gceSTATUS
++gckEVENT_Destroy(
++ IN gckEVENT Event
++ );
++
++/* Add a new event to the list of events. */
++gceSTATUS
++gckEVENT_AddList(
++ IN gckEVENT Event,
++ IN gcsHAL_INTERFACE_PTR Interface,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gctBOOL AllocateAllowed,
++ IN gctBOOL FromKernel
++ );
++
++/* Schedule a FreeNonPagedMemory event. */
++gceSTATUS
++gckEVENT_FreeNonPagedMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeContiguousMemory event. */
++gceSTATUS
++gckEVENT_FreeContiguousMemory(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a FreeVideoMemory event. */
++gceSTATUS
++gckEVENT_FreeVideoMemory(
++ IN gckEVENT Event,
++ IN gcuVIDMEM_NODE_PTR VideoMemory,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule a signal event. */
++gceSTATUS
++gckEVENT_Signal(
++ IN gckEVENT Event,
++ IN gctSIGNAL Signal,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++/* Schedule an Unlock event. */
++gceSTATUS
++gckEVENT_Unlock(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere,
++ IN gcuVIDMEM_NODE_PTR Node,
++ IN gceSURF_TYPE Type
++ );
++
++gceSTATUS
++gckEVENT_CommitDone(
++ IN gckEVENT Event,
++ IN gceKERNEL_WHERE FromWhere
++ );
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++/* Schedule a FreeVirtualCommandBuffer event. */
++gceSTATUS
++gckEVENT_DestroyVirtualCommandBuffer(
++ IN gckEVENT Event,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gceKERNEL_WHERE FromWhere
++ );
++#endif
++
++gceSTATUS
++gckEVENT_Submit(
++ IN gckEVENT Event,
++ IN gctBOOL Wait,
++ IN gctBOOL FromPower
++ );
++
++/* Commit an event queue. */
++gceSTATUS
++gckEVENT_Commit(
++ IN gckEVENT Event,
++ IN gcsQUEUE_PTR Queue
++ );
++
++/* Schedule a composition event. */
++gceSTATUS
++gckEVENT_Compose(
++ IN gckEVENT Event,
++ IN gcsHAL_COMPOSE_PTR Info
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Notify(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ );
++
++/* Event callback routine. */
++gceSTATUS
++gckEVENT_Interrupt(
++ IN gckEVENT Event,
++ IN gctUINT32 IDs
++ );
++
++gceSTATUS
++gckEVENT_Dump(
++ IN gckEVENT Event
++ );
++/******************************************************************************\
++******************************* gckCOMMAND Object ******************************
++\******************************************************************************/
++
++typedef struct _gckCOMMAND * gckCOMMAND;
++
++/* Construct a new gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Construct(
++ IN gckKERNEL Kernel,
++ OUT gckCOMMAND * Command
++ );
++
++/* Destroy an gckCOMMAND object. */
++gceSTATUS
++gckCOMMAND_Destroy(
++ IN gckCOMMAND Command
++ );
++
++/* Acquire command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_EnterCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Release command queue synchronization objects. */
++gceSTATUS
++gckCOMMAND_ExitCommit(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Start the command queue. */
++gceSTATUS
++gckCOMMAND_Start(
++ IN gckCOMMAND Command
++ );
++
++/* Stop the command queue. */
++gceSTATUS
++gckCOMMAND_Stop(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromRecovery
++ );
++
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckCOMMAND_Commit(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context,
++ IN gcoCMDBUF CommandBuffer,
++ IN gcsSTATE_DELTA_PTR StateDelta,
++ IN gcsQUEUE_PTR EventQueue,
++ IN gctUINT32 ProcessID
++ );
++
++/* Reserve space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Reserve(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequestedBytes,
++ OUT gctPOINTER * Buffer,
++ OUT gctSIZE_T * BufferSize
++ );
++
++/* Execute reserved space in the command buffer. */
++gceSTATUS
++gckCOMMAND_Execute(
++ IN gckCOMMAND Command,
++ IN gctSIZE_T RequstedBytes
++ );
++
++/* Stall the command queue. */
++gceSTATUS
++gckCOMMAND_Stall(
++ IN gckCOMMAND Command,
++ IN gctBOOL FromPower
++ );
++
++/* Attach user process. */
++gceSTATUS
++gckCOMMAND_Attach(
++ IN gckCOMMAND Command,
++ OUT gckCONTEXT * Context,
++ OUT gctSIZE_T * StateCount,
++ IN gctUINT32 ProcessID
++ );
++
++/* Detach user process. */
++gceSTATUS
++gckCOMMAND_Detach(
++ IN gckCOMMAND Command,
++ IN gckCONTEXT Context
++ );
++
++#if gcdVIRTUAL_COMMAND_BUFFER
++gceSTATUS
++gckCOMMAND_DumpExecutingBuffer(
++ IN gckCOMMAND Command
++ );
++#endif
++
++/******************************************************************************\
++********************************* gckMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckMMU * gckMMU;
++
++/* Construct a new gckMMU object. */
++gceSTATUS
++gckMMU_Construct(
++ IN gckKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckMMU * Mmu
++ );
++
++/* Destroy an gckMMU object. */
++gceSTATUS
++gckMMU_Destroy(
++ IN gckMMU Mmu
++ );
++
++/* Enable the MMU. */
++gceSTATUS
++gckMMU_Enable(
++ IN gckMMU Mmu,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckMMU_AllocatePages(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++gceSTATUS
++gckMMU_AllocatePagesEx(
++ IN gckMMU Mmu,
++ IN gctSIZE_T PageCount,
++ IN gceSURF_TYPE Type,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckMMU_FreePages(
++ IN gckMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckMMU_SetPage(
++ IN gckMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_InsertNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node);
++
++gceSTATUS
++gckMMU_RemoveNode(
++ IN gckMMU Mmu,
++ IN gcuVIDMEM_NODE_PTR Node);
++#endif
++
++#ifdef __QNXNTO__
++gceSTATUS
++gckMMU_FreeHandleMemory(
++ IN gckKERNEL Kernel,
++ IN gckMMU Mmu,
++ IN gctUINT32 Pid
++ );
++#endif
++
++gceSTATUS
++gckMMU_Flush(
++ IN gckMMU Mmu
++ );
++
++gceSTATUS
++gckMMU_DumpPageTableEntry(
++ IN gckMMU Mmu,
++ IN gctUINT32 Address
++ );
++
++
++#if VIVANTE_PROFILER
++gceSTATUS
++gckHARDWARE_QueryProfileRegisters(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Clear,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++#endif
++
++#if VIVANTE_PROFILER_CONTEXT
++gceSTATUS
++gckHARDWARE_QueryContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gctBOOL Clear,
++ IN gckCONTEXT Context,
++ OUT gcsPROFILER_COUNTERS * Counters
++ );
++
++gceSTATUS
++gckHARDWARE_UpdateContextProfile(
++ IN gckHARDWARE Hardware,
++ IN gckCONTEXT Context
++ );
++#endif
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ );
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ gckHARDWARE Hardware
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#if gcdENABLE_VG
++#include "gc_hal_vg.h"
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_kernel_buffer.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_kernel_buffer.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_kernel_buffer.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_kernel_buffer.h 2015-07-27 23:13:06.214808565 +0200
+@@ -0,0 +1,185 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_buffer_h_
++#define __gc_hal_kernel_buffer_h_
++
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++************************ Command Buffer and Event Objects **********************
++\******************************************************************************/
++
++/* The number of context buffers per user. */
++#define gcdCONTEXT_BUFFER_COUNT 2
++
++/* State delta record. */
++typedef struct _gcsSTATE_DELTA_RECORD * gcsSTATE_DELTA_RECORD_PTR;
++typedef struct _gcsSTATE_DELTA_RECORD
++{
++ /* State address. */
++ gctUINT address;
++
++ /* State mask. */
++ gctUINT32 mask;
++
++ /* State data. */
++ gctUINT32 data;
++}
++gcsSTATE_DELTA_RECORD;
++
++/* State delta. */
++typedef struct _gcsSTATE_DELTA
++{
++ /* For debugging: the number of delta in the order of creation. */
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ gctUINT num;
++#endif
++
++ /* Main state delta ID. Every time state delta structure gets reinitialized,
++ main ID is incremented. If main state ID overflows, all map entry IDs get
++ reinitialized to make sure there is no potential erroneous match after
++ the overflow.*/
++ gctUINT id;
++
++ /* The number of contexts pending modification by the delta. */
++ gctINT refCount;
++
++ /* Vertex element count for the delta buffer. */
++ gctUINT elementCount;
++
++ /* Number of states currently stored in the record array. */
++ gctUINT recordCount;
++
++ /* Record array; holds all modified states in gcsSTATE_DELTA_RECORD. */
++ gctUINT64 recordArray;
++
++ /* Map entry ID is used for map entry validation. If map entry ID does not
++ match the main state delta ID, the entry and the corresponding state are
++ considered not in use. */
++ gctUINT64 mapEntryID;
++ gctUINT mapEntryIDSize;
++
++ /* If the map entry ID matches the main state delta ID, index points to
++ the state record in the record array. */
++ gctUINT64 mapEntryIndex;
++
++ /* Previous and next state deltas in gcsSTATE_DELTA. */
++ gctUINT64 prev;
++ gctUINT64 next;
++}
++gcsSTATE_DELTA;
++
++/* Command buffer object. */
++struct _gcoCMDBUF
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Command buffer entry and exit pipes. */
++ gcePIPE_SELECT entryPipe;
++ gcePIPE_SELECT exitPipe;
++
++ /* Feature usage flags. */
++ gctBOOL using2D;
++ gctBOOL using3D;
++ gctBOOL usingFilterBlit;
++ gctBOOL usingPalette;
++
++ /* Physical address of command buffer. Just a name. */
++ gctUINT32 physical;
++
++ /* Logical address of command buffer. */
++ gctUINT64 logical;
++
++ /* Number of bytes in command buffer. */
++ gctUINT bytes;
++
++ /* Start offset into the command buffer. */
++ gctUINT startOffset;
++
++ /* Current offset into the command buffer. */
++ gctUINT offset;
++
++ /* Number of free bytes in command buffer. */
++ gctUINT free;
++
++ /* Location of the last reserved area. */
++ gctUINT64 lastReserve;
++ gctUINT lastOffset;
++
++#if gcdSECURE_USER
++ /* Hint array for the current command buffer. */
++ gctUINT hintArraySize;
++ gctUINT64 hintArray;
++ gctUINT64 hintArrayTail;
++#endif
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++ /* Last load state command location and hardware address. */
++ gctUINT64 lastLoadStatePtr;
++ gctUINT32 lastLoadStateAddress;
++ gctUINT32 lastLoadStateCount;
++#endif
++};
++
++typedef struct _gcsQUEUE
++{
++ /* Pointer to next gcsQUEUE structure in gcsQUEUE. */
++ gctUINT64 next;
++
++ /* Event information. */
++ gcsHAL_INTERFACE iface;
++}
++gcsQUEUE;
++
++/* Event queue. */
++struct _gcoQUEUE
++{
++ /* The object. */
++ gcsOBJECT object;
++
++ /* Pointer to current event queue. */
++ gcsQUEUE_PTR head;
++ gcsQUEUE_PTR tail;
++
++#ifdef __QNXNTO__
++ /* Buffer for records. */
++ gcsQUEUE_PTR records;
++ gctUINT32 freeBytes;
++ gctUINT32 offset;
++#else
++ /* List of free records. */
++ gcsQUEUE_PTR freeList;
++#endif
++ #define gcdIN_QUEUE_RECORD_LIMIT 16
++ /* Number of records currently in queue */
++ gctUINT32 recordCount;
++};
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_buffer_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_mem.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_mem.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_mem.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_mem.h 2015-07-27 23:13:06.214808565 +0200
+@@ -0,0 +1,530 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++/*
++** Include file for the local memory management.
++*/
++
++#ifndef __gc_hal_mem_h_
++#define __gc_hal_mem_h_
++#ifndef VIVANTE_NO_3D
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/*******************************************************************************
++** Usage:
++
++ The macros to declare MemPool type and functions are
++ gcmMEM_DeclareFSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareVSMemPool (Type, TypeName, Prefix)
++ gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix)
++
++ The data structures for MemPool are
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++
++ The MemPool constructor and destructor functions are
++ gcfMEM_InitFSMemPool(gcsMEM_FS_MEM_POOL *, gcoOS, gctUINT, gctUINT);
++ gcfMEM_FreeFSMemPool(gcsMEM_FS_MEM_POOL *);
++ gcfMEM_InitVSMemPool(gcsMEM_VS_MEM_POOL *, gcoOS, gctUINT, gctBOOL);
++ gcfMEM_FreeVSMemPool(gcsMEM_VS_MEM_POOL *);
++ gcfMEM_InitAFSMemPool(gcsMEM_AFS_MEM_POOL *, gcoOS, gctUINT);
++ gcfMEM_FreeAFSMemPool(gcsMEM_AFS_MEM_POOL *);
++
++ FS: for Fixed-Size data structures
++ VS: for Variable-size data structures
++ AFS: for Array of Fixed-Size data structures
++
++
++ // Example 1: For a fixed-size data structure, struct gcsNode.
++ // It is used locally in a file, so the functions are static without prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type.
++ // The second armument is the short name used in the fuctions.
++ gcmMEM_DeclareFSMemPool(struct gcsNode, Node, );
++
++ // The previous macro creates two inline functions,
++ // _AllocateNode and _FreeNode.
++
++ // In function or struct
++ gcsMEM_FS_MEM_POOL nodeMemPool;
++
++ // In function,
++ struct gcsNode * node;
++ gceSTATUS status;
++
++ // Before using the memory pool, initialize it.
++ // The second argument is the gcoOS object.
++ // The third argument is the number of data structures to allocate for each chunk.
++ status = gcfMEM_InitFSMemPool(&nodeMemPool, os, 100, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node.
++ status = _AllocateNode(nodeMemPool, &node);
++ ...
++ // Free a node.
++ _FreeNode(nodeMemPool, node);
++
++ // After using the memory pool, free it.
++ gcfMEM_FreeFSMemPool(&nodeMemPool);
++
++
++ // Example 2: For array of fixed-size data structures, struct gcsNode.
++ // It is used in several files, so the functions are extern with prefix.
++ // At top level, declear allocate and free functions.
++ // The first argument is the data type, and the second one is the short name
++ // used in the fuctions.
++ gcmMEM_DeclareAFSMemPool(struct gcsNode, NodeArray, gcfOpt);
++
++ // The previous macro creates two inline functions,
++ // gcfOpt_AllocateNodeArray and gcfOpt_FreeNodeArray.
++
++ // In function or struct
++ gcsMEM_AFS_MEM_POOL nodeArrayMemPool;
++
++ // In function,
++ struct gcsNode * nodeArray;
++ gceSTATUS status;
++
++ // Before using the array memory pool, initialize it.
++ // The second argument is the gcoOS object, the third is the number of data
++ // structures to allocate for each chunk.
++ status = gcfMEM_InitAFSMemPool(&nodeArrayMemPool, os, sizeof(struct gcsNode));
++ ...
++
++ // Allocate a node array of size 100.
++ status = gcfOpt_AllocateNodeArray(nodeArrayMemPool, &nodeArray, 100);
++ ...
++ // Free a node array.
++ gcfOpt_FreeNodeArray(&nodeArrayMemPool, nodeArray);
++
++ // After using the array memory pool, free it.
++ gcfMEM_FreeAFSMemPool(&nodeArrayMemPool);
++
++*******************************************************************************/
++
++/*******************************************************************************
++** To switch back to use gcoOS_Allocate and gcoOS_Free, add
++** #define USE_LOCAL_MEMORY_POOL 0
++** before including this file.
++*******************************************************************************/
++#ifndef USE_LOCAL_MEMORY_POOL
++/*
++ USE_LOCAL_MEMORY_POOL
++
++ This define enables the local memory management to improve performance.
++*/
++#define USE_LOCAL_MEMORY_POOL 1
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Structures
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++ typedef struct _gcsMEM_FS_MEM_POOL * gcsMEM_FS_MEM_POOL;
++ typedef struct _gcsMEM_VS_MEM_POOL * gcsMEM_VS_MEM_POOL;
++ typedef struct _gcsMEM_AFS_MEM_POOL * gcsMEM_AFS_MEM_POOL;
++#else
++ typedef gcoOS gcsMEM_FS_MEM_POOL;
++ typedef gcoOS gcsMEM_VS_MEM_POOL;
++ typedef gcoOS gcsMEM_AFS_MEM_POOL;
++#endif
++
++/*******************************************************************************
++** Memory Pool Macros
++*******************************************************************************/
++#if USE_LOCAL_MEMORY_POOL
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ return(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcfMEM_FSMemPoolGetANode(MemPool, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_FSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName##List( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * FirstPointer, \
++ Type * LastPointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x FirstPointer=0x%x LastPointer=0x%x", MemPool, FirstPointer, LastPointer); \
++ status = gcfMEM_FSMemPoolFreeAList(MemPool, (gctPOINTER) FirstPointer, (gctPOINTER) LastPointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status;\
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++ Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcfMEM_VSMemPoolGetANode(MemPool, Size, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pinter); \
++ status = gcfMEM_VSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcfMEM_AFSMemPoolGetANode(MemPool, Count, (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcfMEM_AFSMemPoolFreeANode(MemPool, (gctPOINTER) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#else
++
++#define gcmMEM_DeclareFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type ** Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_FS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareVSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ status = gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Size \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Size=%u", MemPool, Pointer, Size); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Size, \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Size); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_VS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++
++#define gcmMEM_DeclareAFSMemPool(Type, TypeName, Prefix) \
++gceSTATUS \
++Prefix##_Allocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ status = gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer); \
++ gcmFOOTER(); \
++ return status; \
++} \
++ \
++gceSTATUS \
++Prefix##_CAllocate##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type ** Pointer, \
++ gctUINT Count \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x Count=%u", MemPool, Pointer, Count); \
++ gcmERR_RETURN(gcoOS_Allocate(MemPool, \
++ Count * gcmSIZEOF(Type), \
++ (gctPOINTER *) Pointer)); \
++ gcoOS_ZeroMemory(*(gctPOINTER *) Pointer, Count * gcmSIZEOF(Type)); \
++ gcmFOOTER(); \
++ return gcvSTATUS_OK; \
++} \
++ \
++gceSTATUS \
++Prefix##_Free##TypeName( \
++ gcsMEM_AFS_MEM_POOL MemPool, \
++ Type * Pointer \
++ ) \
++{ \
++ gceSTATUS status; \
++ gcmHEADER_ARG("MemPool=0x%x Pointer=0x%x", MemPool, Pointer); \
++ status = gcmOS_SAFE_FREE(MemPool, Pointer); \
++ gcmFOOTER(); \
++ return status; \
++}
++#endif
++
++/*******************************************************************************
++** Memory Pool Data Functions
++*******************************************************************************/
++gceSTATUS
++gcfMEM_InitFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeFSMemPool(
++ IN gcsMEM_FS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolGetANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeANode(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_FSMemPoolFreeAList(
++ IN gcsMEM_FS_MEM_POOL MemPool,
++ IN gctPOINTER FirstNode,
++ IN gctPOINTER LastNode
++ );
++
++gceSTATUS
++gcfMEM_InitVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool,
++ IN gcoOS OS,
++ IN gctUINT BlockSize,
++ IN gctBOOL RecycleFreeNode
++ );
++
++gceSTATUS
++gcfMEM_FreeVSMemPool(
++ IN gcsMEM_VS_MEM_POOL * MemPool
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolGetANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctUINT Size,
++ IN gctUINT Alignment,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_VSMemPoolFreeANode(
++ IN gcsMEM_VS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++gceSTATUS
++gcfMEM_InitAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool,
++ IN gcoOS OS,
++ IN gctUINT NodeCount,
++ IN gctUINT NodeSize
++ );
++
++gceSTATUS
++gcfMEM_FreeAFSMemPool(
++ IN gcsMEM_AFS_MEM_POOL *MemPool
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolGetANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctUINT Count,
++ OUT gctPOINTER * Node
++ );
++
++gceSTATUS
++gcfMEM_AFSMemPoolFreeANode(
++ IN gcsMEM_AFS_MEM_POOL MemPool,
++ IN gctPOINTER Node
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* VIVANTE_NO_3D */
++#endif /* __gc_hal_mem_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_options.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_options.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_options.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_options.h 2015-07-27 23:13:06.214808565 +0200
+@@ -0,0 +1,947 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_options_h_
++#define __gc_hal_options_h_
++
++/*
++ gcdPRINT_VERSION
++
++ Print HAL version.
++*/
++#ifndef gcdPRINT_VERSION
++# define gcdPRINT_VERSION 0
++#endif
++
++/*
++ USE_NEW_LINUX_SIGNAL
++
++ This define enables the Linux kernel signaling between kernel and user.
++*/
++#ifndef USE_NEW_LINUX_SIGNAL
++# define USE_NEW_LINUX_SIGNAL 0
++#endif
++
++/*
++ VIVANTE_PROFILER
++
++ This define enables the profiler.
++*/
++#ifndef VIVANTE_PROFILER
++# define VIVANTE_PROFILER 1
++#endif
++
++#ifndef VIVANTE_PROFILER_PERDRAW
++# define VIVANTE_PROFILER_PERDRAW 0
++#endif
++
++/*
++ VIVANTE_PROFILER_CONTEXT
++
++ This define enables the profiler according to each hw context.
++*/
++#ifndef VIVANTE_PROFILER_CONTEXT
++# define VIVANTE_PROFILER_CONTEXT 1
++#endif
++
++/*
++ gcdUSE_VG
++
++ Enable VG HAL layer (only for GC350).
++*/
++#ifndef gcdUSE_VG
++# define gcdUSE_VG 0
++#endif
++
++/*
++ USE_SW_FB
++
++ Set to 1 if the frame buffer memory cannot be accessed by the GPU.
++*/
++#ifndef USE_SW_FB
++# define USE_SW_FB 0
++#endif
++
++/*
++ USE_SUPER_SAMPLING
++
++ This define enables super-sampling support.
++*/
++#define USE_SUPER_SAMPLING 0
++
++/*
++ PROFILE_HAL_COUNTERS
++
++ This define enables HAL counter profiling support. HW and SHADER
++ counter profiling depends on this.
++*/
++#ifndef PROFILE_HAL_COUNTERS
++# define PROFILE_HAL_COUNTERS 1
++#endif
++
++/*
++ PROFILE_HW_COUNTERS
++
++ This define enables HW counter profiling support.
++*/
++#ifndef PROFILE_HW_COUNTERS
++# define PROFILE_HW_COUNTERS 1
++#endif
++
++/*
++ PROFILE_SHADER_COUNTERS
++
++ This define enables SHADER counter profiling support.
++*/
++#ifndef PROFILE_SHADER_COUNTERS
++# define PROFILE_SHADER_COUNTERS 1
++#endif
++
++/*
++ COMMAND_PROCESSOR_VERSION
++
++ The version of the command buffer and task manager.
++*/
++#define COMMAND_PROCESSOR_VERSION 1
++
++/*
++ gcdDUMP_KEY
++
++ Set this to a string that appears in 'cat /proc/<pid>/cmdline'. E.g. 'camera'.
++ HAL will create dumps for the processes matching this key.
++*/
++#ifndef gcdDUMP_KEY
++# define gcdDUMP_KEY "process"
++#endif
++
++/*
++ gcdDUMP_PATH
++
++ The dump file location. Some processes cannot write to the sdcard.
++ Try apps' data dir, e.g. /data/data/com.android.launcher
++*/
++#ifndef gcdDUMP_PATH
++#if defined(ANDROID)
++# define gcdDUMP_PATH "/mnt/sdcard/"
++#else
++# define gcdDUMP_PATH "./"
++#endif
++#endif
++
++/*
++ gcdDUMP
++
++ When set to 1, a dump of all states and memory uploads, as well as other
++ hardware related execution will be printed to the debug console. This
++ data can be used for playing back applications.
++*/
++#ifndef gcdDUMP
++# define gcdDUMP 0
++#endif
++
++/*
++ gcdDUMP_API
++
++ When set to 1, a high level dump of the EGL and GL/VG APs's are
++ captured.
++*/
++#ifndef gcdDUMP_API
++# define gcdDUMP_API 0
++#endif
++
++/*
++ gcdDUMP_FRAMERATE
++ When set to a value other than zero, averaqe frame rate will be dumped.
++ The value set is the starting frame that the average will be calculated.
++ This is needed because sometimes first few frames are too slow to be included
++ in the average. Frame count starts from 1.
++*/
++#ifndef gcdDUMP_FRAMERATE
++# define gcdDUMP_FRAMERATE 0
++#endif
++
++/*
++ gcdVIRTUAL_COMMAND_BUFFER
++ When set to 1, user command buffer and context buffer will be allocated
++ from gcvPOOL_VIRTUAL.
++*/
++#ifndef gcdVIRTUAL_COMMAND_BUFFER
++# define gcdVIRTUAL_COMMAND_BUFFER 0
++#endif
++
++/*
++ gcdENABLE_FSCALE_VAL_ADJUST
++ When non-zero, FSCALE_VAL when gcvPOWER_ON can be adjusted externally.
++ */
++#ifndef gcdENABLE_FSCALE_VAL_ADJUST
++# define gcdENABLE_FSCALE_VAL_ADJUST 1
++#endif
++
++/*
++ gcdDUMP_IN_KERNEL
++
++ When set to 1, all dumps will happen in the kernel. This is handy if
++ you want the kernel to dump its command buffers as well and the data
++ needs to be in sync.
++*/
++#ifndef gcdDUMP_IN_KERNEL
++# define gcdDUMP_IN_KERNEL 0
++#endif
++
++/*
++ gcdDUMP_COMMAND
++
++ When set to non-zero, the command queue will dump all incoming command
++ and context buffers as well as all other modifications to the command
++ queue.
++*/
++#ifndef gcdDUMP_COMMAND
++# define gcdDUMP_COMMAND 0
++#endif
++
++/*
++ gcdDUMP_FRAME_TGA
++
++ When set to a value other than 0, a dump of the frame specified by the value,
++ will be done into frame.tga. Frame count starts from 1.
++ */
++#ifndef gcdDUMP_FRAME_TGA
++#define gcdDUMP_FRAME_TGA 0
++#endif
++/*
++ gcdNULL_DRIVER
++
++ Set to 1 for infinite speed hardware.
++ Set to 2 for bypassing the HAL.
++ Set to 3 for bypassing the drivers.
++*/
++#ifndef gcdNULL_DRIVER
++# define gcdNULL_DRIVER 0
++#endif
++
++/*
++ gcdENABLE_TIMEOUT_DETECTION
++
++ Enable timeout detection.
++*/
++#ifndef gcdENABLE_TIMEOUT_DETECTION
++# define gcdENABLE_TIMEOUT_DETECTION 0
++#endif
++
++/*
++ gcdCMD_BUFFER_SIZE
++
++ Number of bytes in a command buffer.
++*/
++#ifndef gcdCMD_BUFFER_SIZE
++# define gcdCMD_BUFFER_SIZE (128 << 10)
++#endif
++
++/*
++ gcdCMD_BUFFERS
++
++ Number of command buffers to use per client.
++*/
++#ifndef gcdCMD_BUFFERS
++# define gcdCMD_BUFFERS 2
++#endif
++
++/*
++ gcdMAX_CMD_BUFFERS
++
++ Maximum number of command buffers to use per client.
++*/
++#ifndef gcdMAX_CMD_BUFFERS
++# define gcdMAX_CMD_BUFFERS 8
++#endif
++
++/*
++ gcdCOMMAND_QUEUES
++
++ Number of command queues in the kernel.
++*/
++#ifndef gcdCOMMAND_QUEUES
++# define gcdCOMMAND_QUEUES 2
++#endif
++
++/*
++ gcdPOWER_CONTROL_DELAY
++
++ The delay in milliseconds required to wait until the GPU has woke up
++ from a suspend or power-down state. This is system dependent because
++ the bus clock also needs to stabalize.
++*/
++#ifndef gcdPOWER_CONTROL_DELAY
++# define gcdPOWER_CONTROL_DELAY 0
++#endif
++
++/*
++ gcdMIRROR_PAGETABLE
++
++ Enable it when GPUs with old MMU and new MMU exist at same SoC. It makes
++ each GPU use same virtual address to access same physical memory.
++*/
++#ifndef gcdMIRROR_PAGETABLE
++# define gcdMIRROR_PAGETABLE 0
++#endif
++
++/*
++ gcdMMU_SIZE
++
++ Size of the MMU page table in bytes. Each 4 bytes can hold 4kB worth of
++ virtual data.
++*/
++#ifndef gcdMMU_SIZE
++#if gcdMIRROR_PAGETABLE
++# define gcdMMU_SIZE 0x200000
++#else
++# define gcdMMU_SIZE (2048 << 10)
++#endif
++#endif
++
++/*
++ gcdSECURE_USER
++
++ Use logical addresses instead of physical addresses in user land. In
++ this case a hint table is created for both command buffers and context
++ buffers, and that hint table will be used to patch up those buffers in
++ the kernel when they are ready to submit.
++*/
++#ifndef gcdSECURE_USER
++# define gcdSECURE_USER 0
++#endif
++
++/*
++ gcdSECURE_CACHE_SLOTS
++
++ Number of slots in the logical to DMA address cache table. Each time a
++ logical address needs to be translated into a DMA address for the GPU,
++ this cache will be walked. The replacement scheme is LRU.
++*/
++#ifndef gcdSECURE_CACHE_SLOTS
++# define gcdSECURE_CACHE_SLOTS 1024
++#endif
++
++/*
++ gcdSECURE_CACHE_METHOD
++
++ Replacement scheme used for Secure Cache. The following options are
++ available:
++
++ gcdSECURE_CACHE_LRU
++ A standard LRU cache.
++
++ gcdSECURE_CACHE_LINEAR
++ A linear walker with the idea that an application will always
++ render the scene in a similar way, so the next entry in the
++ cache should be a hit most of the time.
++
++ gcdSECURE_CACHE_HASH
++ A 256-entry hash table.
++
++ gcdSECURE_CACHE_TABLE
++ A simple cache but with potential of a lot of cache replacement.
++*/
++#ifndef gcdSECURE_CACHE_METHOD
++# define gcdSECURE_CACHE_METHOD gcdSECURE_CACHE_HASH
++#endif
++
++/*
++ gcdREGISTER_ACCESS_FROM_USER
++
++ Set to 1 to allow IOCTL calls to get through from user land. This
++ should only be in debug or development drops.
++*/
++#ifndef gcdREGISTER_ACCESS_FROM_USER
++# define gcdREGISTER_ACCESS_FROM_USER 1
++#endif
++
++/*
++ gcdUSER_HEAP_ALLOCATOR
++
++ Set to 1 to enable user mode heap allocator for fast memory allocation
++ and destroying. Otherwise, memory allocation/destroying in user mode
++ will be directly managed by system. Only for linux for now.
++*/
++#ifndef gcdUSER_HEAP_ALLOCATOR
++# define gcdUSER_HEAP_ALLOCATOR 1
++#endif
++
++/*
++ gcdHEAP_SIZE
++
++ Set the allocation size for the internal heaps. Each time a heap is
++ full, a new heap will be allocated with this minmimum amount of bytes.
++ The bigger this size, the fewer heaps there are to allocate, the better
++ the performance. However, heaps won't be freed until they are
++ completely free, so there might be some more memory waste if the size is
++ too big.
++*/
++#ifndef gcdHEAP_SIZE
++# define gcdHEAP_SIZE (64 << 10)
++#endif
++
++/*
++ gcdPOWER_SUSNPEND_WHEN_IDLE
++
++ Set to 1 to make GPU enter gcvPOWER_SUSPEND when idle detected,
++ otherwise GPU will enter gcvPOWER_IDLE.
++*/
++#ifndef gcdPOWER_SUSNPEND_WHEN_IDLE
++# define gcdPOWER_SUSNPEND_WHEN_IDLE 1
++#endif
++
++/*
++ gcdFPGA_BUILD
++
++ This define enables work arounds for FPGA images.
++*/
++#ifndef gcdFPGA_BUILD
++# define gcdFPGA_BUILD 0
++#endif
++
++/*
++ gcdGPU_TIMEOUT
++
++ This define specified the number of milliseconds the system will wait
++ before it broadcasts the GPU is stuck. In other words, it will define
++ the timeout of any operation that needs to wait for the GPU.
++
++ If the value is 0, no timeout will be checked for.
++*/
++#ifndef gcdGPU_TIMEOUT
++#if gcdFPGA_BUILD
++# define gcdGPU_TIMEOUT 0
++# else
++# define gcdGPU_TIMEOUT 20000
++# endif
++#endif
++
++/*
++ gcdGPU_ADVANCETIMER
++
++ it is advance timer.
++*/
++#ifndef gcdGPU_ADVANCETIMER
++# define gcdGPU_ADVANCETIMER 250
++#endif
++
++/*
++ gcdSTATIC_LINK
++
++ This define disalbes static linking;
++*/
++#ifndef gcdSTATIC_LINK
++# define gcdSTATIC_LINK 0
++#endif
++
++/*
++ gcdUSE_NEW_HEAP
++
++ Setting this define to 1 enables new heap.
++*/
++#ifndef gcdUSE_NEW_HEAP
++# define gcdUSE_NEW_HEAP 0
++#endif
++
++/*
++ gcdCMD_NO_2D_CONTEXT
++
++ This define enables no-context 2D command buffer.
++*/
++#ifndef gcdCMD_NO_2D_CONTEXT
++# define gcdCMD_NO_2D_CONTEXT 1
++#endif
++
++/*
++ gcdENABLE_BANK_ALIGNMENT
++
++ When enabled, video memory is allocated bank aligned. The vendor can modify
++ _GetSurfaceBankAlignment() and gcoSURF_GetBankOffsetBytes() to define how
++ different types of allocations are bank and channel aligned.
++ When disabled (default), no bank alignment is done.
++*/
++#ifndef gcdENABLE_BANK_ALIGNMENT
++# define gcdENABLE_BANK_ALIGNMENT 0
++#endif
++
++/*
++ gcdBANK_BIT_START
++
++ Specifies the start bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_START
++# define gcdBANK_BIT_START 12
++#endif
++
++/*
++ gcdBANK_BIT_END
++
++ Specifies the end bit of the bank (inclusive).
++*/
++#ifndef gcdBANK_BIT_END
++# define gcdBANK_BIT_END 14
++#endif
++
++/*
++ gcdBANK_CHANNEL_BIT
++
++ When set, video memory when allocated bank aligned is allocated such that
++ render and depth buffer addresses alternate on the channel bit specified.
++ This option has an effect only when gcdENABLE_BANK_ALIGNMENT is enabled.
++ When disabled (default), no alteration is done.
++*/
++#ifndef gcdBANK_CHANNEL_BIT
++# define gcdBANK_CHANNEL_BIT 7
++#endif
++
++/*
++ gcdDYNAMIC_SPEED
++
++ When non-zero, it informs the kernel driver to use the speed throttling
++ broadcasting functions to inform the system the GPU should be spet up or
++ slowed down. It will send a broadcast for slowdown each "interval"
++ specified by this define in milliseconds
++ (gckOS_BroadcastCalibrateSpeed).
++*/
++#ifndef gcdDYNAMIC_SPEED
++# define gcdDYNAMIC_SPEED 2000
++#endif
++
++/*
++ gcdDYNAMIC_EVENT_THRESHOLD
++
++ When non-zero, it specifies the maximum number of available events at
++ which the kernel driver will issue a broadcast to speed up the GPU
++ (gckOS_BroadcastHurry).
++*/
++#ifndef gcdDYNAMIC_EVENT_THRESHOLD
++# define gcdDYNAMIC_EVENT_THRESHOLD 5
++#endif
++
++/*
++ gcdENABLE_PROFILING
++
++ Enable profiling macros.
++*/
++#ifndef gcdENABLE_PROFILING
++# define gcdENABLE_PROFILING 0
++#endif
++
++/*
++ gcdENABLE_128B_MERGE
++
++ Enable 128B merge for the BUS control.
++*/
++#ifndef gcdENABLE_128B_MERGE
++# define gcdENABLE_128B_MERGE 0
++#endif
++
++/*
++ gcdFRAME_DB
++
++ When non-zero, it specified the number of frames inside the frame
++ database. The frame DB will collect per-frame timestamps and hardware
++ counters.
++*/
++#ifndef gcdFRAME_DB
++# define gcdFRAME_DB 0
++# define gcdFRAME_DB_RESET 0
++# define gcdFRAME_DB_NAME "/var/log/frameDB.log"
++#endif
++
++/*
++ gcdENABLE_VG
++ enable the 2D openVG
++*/
++
++#ifndef gcdENABLE_VG
++# define gcdENABLE_VG 0
++#endif
++
++/*
++ gcdDYNAMIC_MAP_RESERVED_MEMORY
++
++ When gcvPOOL_SYSTEM is constructed from RESERVED memory,
++ driver can map the whole reserved memory to kernel space
++ at the beginning, or just map a piece of memory when need
++ to access.
++
++ Notice:
++ - It's only for the 2D openVG. For other cores, there is
++ _NO_ need to map reserved memory to kernel.
++ - It's meaningless when memory is allocated by
++ gckOS_AllocateContiguous, in that case, memory is always
++ mapped by system when allocated.
++*/
++#ifndef gcdDYNAMIC_MAP_RESERVED_MEMORY
++# define gcdDYNAMIC_MAP_RESERVED_MEMORY 1
++#endif
++
++/*
++ gcdPAGED_MEMORY_CACHEABLE
++
++ When non-zero, paged memory will be cacheable.
++
++ Normally, driver will detemines whether a video memory
++ is cacheable or not. When cacheable is not neccessary,
++ it will be writecombine.
++
++ This option is only for those SOC which can't enable
++ writecombine without enabling cacheable.
++*/
++
++#ifndef gcdPAGED_MEMORY_CACHEABLE
++# define gcdPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_CACHEABLE
++
++ When non-zero, non paged memory will be cacheable.
++*/
++
++#ifndef gcdNONPAGED_MEMORY_CACHEABLE
++# define gcdNONPAGED_MEMORY_CACHEABLE 0
++#endif
++
++/*
++ gcdNONPAGED_MEMORY_BUFFERABLE
++
++ When non-zero, non paged memory will be bufferable.
++ gcdNONPAGED_MEMORY_BUFFERABLE and gcdNONPAGED_MEMORY_CACHEABLE
++ can't be set 1 at same time
++*/
++
++#ifndef gcdNONPAGED_MEMORY_BUFFERABLE
++# define gcdNONPAGED_MEMORY_BUFFERABLE 1
++#endif
++
++/*
++ gcdENABLE_INFINITE_SPEED_HW
++ enable the Infinte HW , this is for 2D openVG
++*/
++
++#ifndef gcdENABLE_INFINITE_SPEED_HW
++# define gcdENABLE_INFINITE_SPEED_HW 0
++#endif
++
++/*
++ gcdENABLE_TS_DOUBLE_BUFFER
++ enable the TS double buffer, this is for 2D openVG
++*/
++
++#ifndef gcdENABLE_TS_DOUBLE_BUFFER
++# define gcdENABLE_TS_DOUBLE_BUFFER 1
++#endif
++
++/*
++ gcd6000_SUPPORT
++
++ Temporary define to enable/disable 6000 support.
++ */
++#ifndef gcd6000_SUPPORT
++# define gcd6000_SUPPORT 0
++#endif
++
++/*
++ gcdPOWEROFF_TIMEOUT
++
++ When non-zero, GPU will power off automatically from
++ idle state, and gcdPOWEROFF_TIMEOUT is also the default
++ timeout in milliseconds.
++ */
++
++#ifndef gcdPOWEROFF_TIMEOUT
++# define gcdPOWEROFF_TIMEOUT 300
++#endif
++
++/*
++ gcdUSE_VIDMEM_PER_PID
++*/
++#ifndef gcdUSE_VIDMEM_PER_PID
++# define gcdUSE_VIDMEM_PER_PID 0
++#endif
++
++/*
++ QNX_SINGLE_THREADED_DEBUGGING
++*/
++#ifndef QNX_SINGLE_THREADED_DEBUGGING
++# define QNX_SINGLE_THREADED_DEBUGGING 0
++#endif
++
++/*
++ gcdENABLE_RECOVERY
++
++ This define enables the recovery code.
++*/
++#ifndef gcdENABLE_RECOVERY
++# define gcdENABLE_RECOVERY 0
++#endif
++
++/*
++ gcdRENDER_THREADS
++
++ Number of render threads. Make it zero, and there will be no render
++ threads.
++*/
++#ifndef gcdRENDER_THREADS
++# define gcdRENDER_THREADS 0
++#endif
++
++/*
++ gcdSMP
++
++ This define enables SMP support.
++
++ Currently, it only works on Linux/Android,
++ Kbuild will config it according to whether
++ CONFIG_SMP is set.
++
++*/
++#ifndef gcdSMP
++# define gcdSMP 0
++#endif
++
++/*
++ gcdSUPPORT_SWAP_RECTANGLE
++
++ Support swap with a specific rectangle.
++
++ Set the rectangle with eglSetSwapRectangleANDROID api.
++*/
++#ifndef gcdSUPPORT_SWAP_RECTANGLE
++# define gcdSUPPORT_SWAP_RECTANGLE 0
++#endif
++
++/*
++ gcdGPU_LINEAR_BUFFER_ENABLED
++
++ Use linear buffer for GPU apps so HWC can do 2D composition.
++*/
++#ifndef gcdGPU_LINEAR_BUFFER_ENABLED
++# define gcdGPU_LINEAR_BUFFER_ENABLED 1
++#endif
++
++/*
++ gcdENABLE_RENDER_INTO_WINDOW
++
++ Enable Render-Into-Window (ie, No-Resolve) feature on android.
++ NOTE that even if enabled, it still depends on hardware feature and
++ android application behavior. When hardware feature or application
++ behavior can not support render into window mode, it will fail back
++ to normal mode.
++ When Render-Into-Window is finally used, window back buffer of android
++ applications will be allocated matching render target tiling format.
++ Otherwise buffer tiling is decided by the above option
++ 'gcdGPU_LINEAR_BUFFER_ENABLED'.
++*/
++#ifndef gcdENABLE_RENDER_INTO_WINDOW
++# define gcdENABLE_RENDER_INTO_WINDOW 1
++#endif
++
++/*
++ gcdSHARED_RESOLVE_BUFFER_ENABLED
++
++ Use shared resolve buffer for all app buffers.
++*/
++#ifndef gcdSHARED_RESOLVE_BUFFER_ENABLED
++# define gcdSHARED_RESOLVE_BUFFER_ENABLED 0
++#endif
++
++/*
++ gcdUSE_TRIANGLE_STRIP_PATCH
++ */
++#ifndef gcdUSE_TRIANGLE_STRIP_PATCH
++# define gcdUSE_TRIANGLE_STRIP_PATCH 1
++#endif
++
++/*
++ gcdENABLE_OUTER_CACHE_PATCH
++
++ Enable the outer cache patch.
++*/
++#ifndef gcdENABLE_OUTER_CACHE_PATCH
++# define gcdENABLE_OUTER_CACHE_PATCH 0
++#endif
++
++#ifndef gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST
++# ifdef ANDROID
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 1
++# else
++# define gcdANDROID_UNALIGNED_LINEAR_COMPOSITION_ADJUST 0
++# endif
++#endif
++
++#ifndef gcdENABLE_PE_DITHER_FIX
++# define gcdENABLE_PE_DITHER_FIX 1
++#endif
++
++#ifndef gcdSHARED_PAGETABLE
++# define gcdSHARED_PAGETABLE 1
++#endif
++#ifndef gcdUSE_PVR
++# define gcdUSE_PVR 1
++#endif
++
++/*
++ gcdSMALL_BLOCK_SIZE
++
++ When non-zero, a part of VIDMEM will be reserved for requests
++ whose requesting size is less than gcdSMALL_BLOCK_SIZE.
++
++ For Linux, it's the size of a page. If this requeset fallbacks
++ to gcvPOOL_CONTIGUOUS or gcvPOOL_VIRTUAL, memory will be wasted
++ because they allocate a page at least.
++ */
++#ifndef gcdSMALL_BLOCK_SIZE
++# define gcdSMALL_BLOCK_SIZE 4096
++# define gcdRATIO_FOR_SMALL_MEMORY 32
++#endif
++
++/*
++ gcdCONTIGUOUS_SIZE_LIMIT
++ When non-zero, size of video node from gcvPOOL_CONTIGUOUS is
++ limited by gcdCONTIGUOUS_SIZE_LIMIT.
++ */
++#ifndef gcdCONTIGUOUS_SIZE_LIMIT
++# define gcdCONTIGUOUS_SIZE_LIMIT 0
++#endif
++
++#ifndef gcdDISALBE_EARLY_EARLY_Z
++# define gcdDISALBE_EARLY_EARLY_Z 1
++#endif
++
++#ifndef gcdSHADER_SRC_BY_MACHINECODE
++# define gcdSHADER_SRC_BY_MACHINECODE 1
++#endif
++
++/*
++ gcdLINK_QUEUE_SIZE
++
++ When non-zero, driver maintains a queue to record information of
++ latest lined context buffer and command buffer. Data in this queue
++ is be used to debug.
++*/
++#ifndef gcdLINK_QUEUE_SIZE
++# define gcdLINK_QUEUE_SIZE 0
++#endif
++
++/* gcdALPHA_KILL_IN_SHADER
++ *
++ * Enable alpha kill inside the shader. This will be set automatically by the
++ * HAL if certain states match a criteria.
++ */
++#ifndef gcdALPHA_KILL_IN_SHADER
++# define gcdALPHA_KILL_IN_SHADER 1
++#endif
++
++/* gcdHIGH_PRECISION_DELAY_ENABLE
++ *
++ * Enable high precision schedule delay with 1ms unit. otherwise schedule delay up to 10ms.
++ * Browser app performance will have obvious drop without this enablement
++ */
++#ifndef gcdHIGH_PRECISION_DELAY_ENABLE
++# define gcdHIGH_PRECISION_DELAY_ENABLE 1
++#endif
++
++#ifndef gcdUSE_WCLIP_PATCH
++# define gcdUSE_WCLIP_PATCH 1
++#endif
++
++#ifndef gcdHZ_L2_DISALBE
++# define gcdHZ_L2_DISALBE 1
++#endif
++
++#ifndef gcdBUGFIX15_DISABLE
++# define gcdBUGFIX15_DISABLE 1
++#endif
++
++#ifndef gcdDISABLE_HZ_FAST_CLEAR
++# define gcdDISABLE_HZ_FAST_CLEAR 1
++#endif
++
++#ifndef gcdUSE_NPOT_PATCH
++#define gcdUSE_NPOT_PATCH 1
++#endif
++
++#ifndef gcdSYNC
++# define gcdSYNC 1
++#endif
++
++#ifndef gcdENABLE_SPECIAL_HINT3
++# define gcdENABLE_SPECIAL_HINT3 1
++#endif
++
++#if defined(ANDROID)
++#ifndef gcdPRE_ROTATION
++# define gcdPRE_ROTATION 1
++#endif
++#endif
++
++/*
++ gcdDVFS
++
++ When non-zero, software will make use of dynamic voltage and
++ frequency feature.
++ */
++#ifndef gcdDVFS
++# define gcdDVFS 1
++# define gcdDVFS_ANAYLSE_WINDOW 4
++# define gcdDVFS_POLLING_TIME (gcdDVFS_ANAYLSE_WINDOW * 4)
++#endif
++
++/*
++ gcdANDROID_NATIVE_FENCE_SYNC
++
++ Enable android native fence sync. It is introduced since jellybean-4.2.
++ Depends on linux kernel option: CONFIG_SYNC.
++
++ 0: Disabled
++ 1: Build framework for native fence sync feature, and EGL extension
++ 2: Enable async swap buffers for client
++ * Native fence sync for client 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for layer in compositor side.
++ 3. Enable async hwcomposer composition.
++ * 'releaseFenceFd' for layer in compositor side, which is native
++ fence sync when client 'dequeueBuffer'
++ * Native fence sync for compositor 'queueBuffer' in EGL, which is
++ 'acquireFenceFd' for framebuffer target for DC
++ */
++#ifndef gcdANDROID_NATIVE_FENCE_SYNC
++# define gcdANDROID_NATIVE_FENCE_SYNC 0
++#endif
++
++#ifndef gcdFORCE_MIPMAP
++# define gcdFORCE_MIPMAP 1
++#endif
++
++/*
++ gcdFORCE_GAL_LOAD_TWICE
++
++ When non-zero, each thread except the main one will load libGAL.so twice to avoid potential segmetantion fault when app using dlopen/dlclose.
++ If threads exit arbitrarily, libGAL.so may not unload until the process quit.
++ */
++#ifndef gcdFORCE_GAL_LOAD_TWICE
++# define gcdFORCE_GAL_LOAD_TWICE 0
++#endif
++
++#endif /* __gc_hal_options_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_profiler.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_profiler.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_profiler.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_profiler.h 2015-07-27 23:13:06.214808565 +0200
+@@ -0,0 +1,584 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_profiler_h_
++#define __gc_hal_profiler_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#define GLVERTEX_OBJECT 10
++#define GLVERTEX_OBJECT_BYTES 11
++
++#define GLINDEX_OBJECT 20
++#define GLINDEX_OBJECT_BYTES 21
++
++#define GLTEXTURE_OBJECT 30
++#define GLTEXTURE_OBJECT_BYTES 31
++
++#if VIVANTE_PROFILER
++#define gcmPROFILE_GC(Enum, Value) gcoPROFILER_Count(gcvNULL, Enum, Value)
++#else
++#define gcmPROFILE_GC(Enum, Value) do { } while (gcvFALSE)
++#endif
++
++#ifndef gcdNEW_PROFILER_FILE
++#define gcdNEW_PROFILER_FILE 1
++#endif
++
++#define ES11_CALLS 151
++#define ES11_DRAWCALLS (ES11_CALLS + 1)
++#define ES11_STATECHANGECALLS (ES11_DRAWCALLS + 1)
++#define ES11_POINTCOUNT (ES11_STATECHANGECALLS + 1)
++#define ES11_LINECOUNT (ES11_POINTCOUNT + 1)
++#define ES11_TRIANGLECOUNT (ES11_LINECOUNT + 1)
++
++#define ES20_CALLS 159
++#define ES20_DRAWCALLS (ES20_CALLS + 1)
++#define ES20_STATECHANGECALLS (ES20_DRAWCALLS + 1)
++#define ES20_POINTCOUNT (ES20_STATECHANGECALLS + 1)
++#define ES20_LINECOUNT (ES20_POINTCOUNT + 1)
++#define ES20_TRIANGLECOUNT (ES20_LINECOUNT + 1)
++
++#define VG11_CALLS 88
++#define VG11_DRAWCALLS (VG11_CALLS + 1)
++#define VG11_STATECHANGECALLS (VG11_DRAWCALLS + 1)
++#define VG11_FILLCOUNT (VG11_STATECHANGECALLS + 1)
++#define VG11_STROKECOUNT (VG11_FILLCOUNT + 1)
++/* End of Driver API ID Definitions. */
++
++/* HAL & MISC IDs. */
++#define HAL_VERTBUFNEWBYTEALLOC 1
++#define HAL_VERTBUFTOTALBYTEALLOC (HAL_VERTBUFNEWBYTEALLOC + 1)
++#define HAL_VERTBUFNEWOBJALLOC (HAL_VERTBUFTOTALBYTEALLOC + 1)
++#define HAL_VERTBUFTOTALOBJALLOC (HAL_VERTBUFNEWOBJALLOC + 1)
++#define HAL_INDBUFNEWBYTEALLOC (HAL_VERTBUFTOTALOBJALLOC + 1)
++#define HAL_INDBUFTOTALBYTEALLOC (HAL_INDBUFNEWBYTEALLOC + 1)
++#define HAL_INDBUFNEWOBJALLOC (HAL_INDBUFTOTALBYTEALLOC + 1)
++#define HAL_INDBUFTOTALOBJALLOC (HAL_INDBUFNEWOBJALLOC + 1)
++#define HAL_TEXBUFNEWBYTEALLOC (HAL_INDBUFTOTALOBJALLOC + 1)
++#define HAL_TEXBUFTOTALBYTEALLOC (HAL_TEXBUFNEWBYTEALLOC + 1)
++#define HAL_TEXBUFNEWOBJALLOC (HAL_TEXBUFTOTALBYTEALLOC + 1)
++#define HAL_TEXBUFTOTALOBJALLOC (HAL_TEXBUFNEWOBJALLOC + 1)
++
++#define GPU_CYCLES 1
++#define GPU_READ64BYTE (GPU_CYCLES + 1)
++#define GPU_WRITE64BYTE (GPU_READ64BYTE + 1)
++#define GPU_TOTALCYCLES (GPU_WRITE64BYTE + 1)
++#define GPU_IDLECYCLES (GPU_TOTALCYCLES + 1)
++
++#define VS_INSTCOUNT 1
++#define VS_BRANCHINSTCOUNT (VS_INSTCOUNT + 1)
++#define VS_TEXLDINSTCOUNT (VS_BRANCHINSTCOUNT + 1)
++#define VS_RENDEREDVERTCOUNT (VS_TEXLDINSTCOUNT + 1)
++#define VS_SOURCE (VS_RENDEREDVERTCOUNT + 1)
++
++#define PS_INSTCOUNT 1
++#define PS_BRANCHINSTCOUNT (PS_INSTCOUNT + 1)
++#define PS_TEXLDINSTCOUNT (PS_BRANCHINSTCOUNT + 1)
++#define PS_RENDEREDPIXCOUNT (PS_TEXLDINSTCOUNT + 1)
++#define PS_SOURCE (PS_RENDEREDPIXCOUNT + 1)
++
++#define PA_INVERTCOUNT 1
++#define PA_INPRIMCOUNT (PA_INVERTCOUNT + 1)
++#define PA_OUTPRIMCOUNT (PA_INPRIMCOUNT + 1)
++#define PA_DEPTHCLIPCOUNT (PA_OUTPRIMCOUNT + 1)
++#define PA_TRIVIALREJCOUNT (PA_DEPTHCLIPCOUNT + 1)
++#define PA_CULLCOUNT (PA_TRIVIALREJCOUNT + 1)
++
++#define SE_TRIANGLECOUNT 1
++#define SE_LINECOUNT (SE_TRIANGLECOUNT + 1)
++
++#define RA_VALIDPIXCOUNT 1
++#define RA_TOTALQUADCOUNT (RA_VALIDPIXCOUNT + 1)
++#define RA_VALIDQUADCOUNTEZ (RA_TOTALQUADCOUNT + 1)
++#define RA_TOTALPRIMCOUNT (RA_VALIDQUADCOUNTEZ + 1)
++#define RA_PIPECACHEMISSCOUNT (RA_TOTALPRIMCOUNT + 1)
++#define RA_PREFCACHEMISSCOUNT (RA_PIPECACHEMISSCOUNT + 1)
++#define RA_EEZCULLCOUNT (RA_PREFCACHEMISSCOUNT + 1)
++
++#define TX_TOTBILINEARREQ 1
++#define TX_TOTTRILINEARREQ (TX_TOTBILINEARREQ + 1)
++#define TX_TOTDISCARDTEXREQ (TX_TOTTRILINEARREQ + 1)
++#define TX_TOTTEXREQ (TX_TOTDISCARDTEXREQ + 1)
++#define TX_MEMREADCOUNT (TX_TOTTEXREQ + 1)
++#define TX_MEMREADIN8BCOUNT (TX_MEMREADCOUNT + 1)
++#define TX_CACHEMISSCOUNT (TX_MEMREADIN8BCOUNT + 1)
++#define TX_CACHEHITTEXELCOUNT (TX_CACHEMISSCOUNT + 1)
++#define TX_CACHEMISSTEXELCOUNT (TX_CACHEHITTEXELCOUNT + 1)
++
++#define PE_KILLEDBYCOLOR 1
++#define PE_KILLEDBYDEPTH (PE_KILLEDBYCOLOR + 1)
++#define PE_DRAWNBYCOLOR (PE_KILLEDBYDEPTH + 1)
++#define PE_DRAWNBYDEPTH (PE_DRAWNBYCOLOR + 1)
++
++#define MC_READREQ8BPIPE 1
++#define MC_READREQ8BIP (MC_READREQ8BPIPE + 1)
++#define MC_WRITEREQ8BPIPE (MC_READREQ8BIP + 1)
++
++#define AXI_READREQSTALLED 1
++#define AXI_WRITEREQSTALLED (AXI_READREQSTALLED + 1)
++#define AXI_WRITEDATASTALLED (AXI_WRITEREQSTALLED + 1)
++
++#define PVS_INSTRCOUNT 1
++#define PVS_ALUINSTRCOUNT (PVS_INSTRCOUNT + 1)
++#define PVS_TEXINSTRCOUNT (PVS_ALUINSTRCOUNT + 1)
++#define PVS_ATTRIBCOUNT (PVS_TEXINSTRCOUNT + 1)
++#define PVS_UNIFORMCOUNT (PVS_ATTRIBCOUNT + 1)
++#define PVS_FUNCTIONCOUNT (PVS_UNIFORMCOUNT + 1)
++#define PVS_SOURCE (PVS_FUNCTIONCOUNT + 1)
++
++#define PPS_INSTRCOUNT 1
++#define PPS_ALUINSTRCOUNT (PPS_INSTRCOUNT + 1)
++#define PPS_TEXINSTRCOUNT (PPS_ALUINSTRCOUNT + 1)
++#define PPS_ATTRIBCOUNT (PPS_TEXINSTRCOUNT + 1)
++#define PPS_UNIFORMCOUNT (PPS_ATTRIBCOUNT + 1)
++#define PPS_FUNCTIONCOUNT (PPS_UNIFORMCOUNT + 1)
++#define PPS_SOURCE (PPS_FUNCTIONCOUNT + 1)
++/* End of MISC Counter IDs. */
++
++#ifdef gcdNEW_PROFILER_FILE
++
++/* Category Constants. */
++#define VPHEADER 0x010000
++#define VPG_INFO 0x020000
++#define VPG_TIME 0x030000
++#define VPG_MEM 0x040000
++#define VPG_ES11 0x050000
++#define VPG_ES20 0x060000
++#define VPG_VG11 0x070000
++#define VPG_HAL 0x080000
++#define VPG_HW 0x090000
++#define VPG_GPU 0x0a0000
++#define VPG_VS 0x0b0000
++#define VPG_PS 0x0c0000
++#define VPG_PA 0x0d0000
++#define VPG_SETUP 0x0e0000
++#define VPG_RA 0x0f0000
++#define VPG_TX 0x100000
++#define VPG_PE 0x110000
++#define VPG_MC 0x120000
++#define VPG_AXI 0x130000
++#define VPG_PROG 0x140000
++#define VPG_PVS 0x150000
++#define VPG_PPS 0x160000
++#define VPG_ES11_TIME 0x170000
++#define VPG_ES20_TIME 0x180000
++#define VPG_FRAME 0x190000
++#define VPG_ES11_DRAW 0x200000
++#define VPG_ES20_DRAW 0x210000
++#define VPG_END 0xff0000
++
++/* Info. */
++#define VPC_INFOCOMPANY (VPG_INFO + 1)
++#define VPC_INFOVERSION (VPC_INFOCOMPANY + 1)
++#define VPC_INFORENDERER (VPC_INFOVERSION + 1)
++#define VPC_INFOREVISION (VPC_INFORENDERER + 1)
++#define VPC_INFODRIVER (VPC_INFOREVISION + 1)
++#define VPC_INFODRIVERMODE (VPC_INFODRIVER + 1)
++#define VPC_INFOSCREENSIZE (VPC_INFODRIVERMODE + 1)
++
++/* Counter Constants. */
++#define VPC_ELAPSETIME (VPG_TIME + 1)
++#define VPC_CPUTIME (VPC_ELAPSETIME + 1)
++
++#define VPC_MEMMAXRES (VPG_MEM + 1)
++#define VPC_MEMSHARED (VPC_MEMMAXRES + 1)
++#define VPC_MEMUNSHAREDDATA (VPC_MEMSHARED + 1)
++#define VPC_MEMUNSHAREDSTACK (VPC_MEMUNSHAREDDATA + 1)
++
++/* OpenGL ES11 Statics Counter IDs. */
++#define VPC_ES11CALLS (VPG_ES11 + ES11_CALLS)
++#define VPC_ES11DRAWCALLS (VPG_ES11 + ES11_DRAWCALLS)
++#define VPC_ES11STATECHANGECALLS (VPG_ES11 + ES11_STATECHANGECALLS)
++#define VPC_ES11POINTCOUNT (VPG_ES11 + ES11_POINTCOUNT)
++#define VPC_ES11LINECOUNT (VPG_ES11 + ES11_LINECOUNT)
++#define VPC_ES11TRIANGLECOUNT (VPG_ES11 + ES11_TRIANGLECOUNT)
++
++/* OpenGL ES20 Statistics Counter IDs. */
++#define VPC_ES20CALLS (VPG_ES20 + ES20_CALLS)
++#define VPC_ES20DRAWCALLS (VPG_ES20 + ES20_DRAWCALLS)
++#define VPC_ES20STATECHANGECALLS (VPG_ES20 + ES20_STATECHANGECALLS)
++#define VPC_ES20POINTCOUNT (VPG_ES20 + ES20_POINTCOUNT)
++#define VPC_ES20LINECOUNT (VPG_ES20 + ES20_LINECOUNT)
++#define VPC_ES20TRIANGLECOUNT (VPG_ES20 + ES20_TRIANGLECOUNT)
++
++/* OpenVG Statistics Counter IDs. */
++#define VPC_VG11CALLS (VPG_VG11 + VG11_CALLS)
++#define VPC_VG11DRAWCALLS (VPG_VG11 + VG11_DRAWCALLS)
++#define VPC_VG11STATECHANGECALLS (VPG_VG11 + VG11_STATECHANGECALLS)
++#define VPC_VG11FILLCOUNT (VPG_VG11 + VG11_FILLCOUNT)
++#define VPC_VG11STROKECOUNT (VPG_VG11 + VG11_STROKECOUNT)
++
++/* HAL Counters. */
++#define VPC_HALVERTBUFNEWBYTEALLOC (VPG_HAL + HAL_VERTBUFNEWBYTEALLOC)
++#define VPC_HALVERTBUFTOTALBYTEALLOC (VPG_HAL + HAL_VERTBUFTOTALBYTEALLOC)
++#define VPC_HALVERTBUFNEWOBJALLOC (VPG_HAL + HAL_VERTBUFNEWOBJALLOC)
++#define VPC_HALVERTBUFTOTALOBJALLOC (VPG_HAL + HAL_VERTBUFTOTALOBJALLOC)
++#define VPC_HALINDBUFNEWBYTEALLOC (VPG_HAL + HAL_INDBUFNEWBYTEALLOC)
++#define VPC_HALINDBUFTOTALBYTEALLOC (VPG_HAL + HAL_INDBUFTOTALBYTEALLOC)
++#define VPC_HALINDBUFNEWOBJALLOC (VPG_HAL + HAL_INDBUFNEWOBJALLOC)
++#define VPC_HALINDBUFTOTALOBJALLOC (VPG_HAL + HAL_INDBUFTOTALOBJALLOC)
++#define VPC_HALTEXBUFNEWBYTEALLOC (VPG_HAL + HAL_TEXBUFNEWBYTEALLOC)
++#define VPC_HALTEXBUFTOTALBYTEALLOC (VPG_HAL + HAL_TEXBUFTOTALBYTEALLOC)
++#define VPC_HALTEXBUFNEWOBJALLOC (VPG_HAL + HAL_TEXBUFNEWOBJALLOC)
++#define VPC_HALTEXBUFTOTALOBJALLOC (VPG_HAL + HAL_TEXBUFTOTALOBJALLOC)
++
++/* HW: GPU Counters. */
++#define VPC_GPUCYCLES (VPG_GPU + GPU_CYCLES)
++#define VPC_GPUREAD64BYTE (VPG_GPU + GPU_READ64BYTE)
++#define VPC_GPUWRITE64BYTE (VPG_GPU + GPU_WRITE64BYTE)
++#define VPC_GPUTOTALCYCLES (VPG_GPU + GPU_TOTALCYCLES)
++#define VPC_GPUIDLECYCLES (VPG_GPU + GPU_IDLECYCLES)
++
++/* HW: Shader Counters. */
++#define VPC_VSINSTCOUNT (VPG_VS + VS_INSTCOUNT)
++#define VPC_VSBRANCHINSTCOUNT (VPG_VS + VS_BRANCHINSTCOUNT)
++#define VPC_VSTEXLDINSTCOUNT (VPG_VS + VS_TEXLDINSTCOUNT)
++#define VPC_VSRENDEREDVERTCOUNT (VPG_VS + VS_RENDEREDVERTCOUNT)
++/* HW: PS Count. */
++#define VPC_PSINSTCOUNT (VPG_PS + PS_INSTCOUNT)
++#define VPC_PSBRANCHINSTCOUNT (VPG_PS + PS_BRANCHINSTCOUNT)
++#define VPC_PSTEXLDINSTCOUNT (VPG_PS + PS_TEXLDINSTCOUNT)
++#define VPC_PSRENDEREDPIXCOUNT (VPG_PS + PS_RENDEREDPIXCOUNT)
++
++
++/* HW: PA Counters. */
++#define VPC_PAINVERTCOUNT (VPG_PA + PA_INVERTCOUNT)
++#define VPC_PAINPRIMCOUNT (VPG_PA + PA_INPRIMCOUNT)
++#define VPC_PAOUTPRIMCOUNT (VPG_PA + PA_OUTPRIMCOUNT)
++#define VPC_PADEPTHCLIPCOUNT (VPG_PA + PA_DEPTHCLIPCOUNT)
++#define VPC_PATRIVIALREJCOUNT (VPG_PA + PA_TRIVIALREJCOUNT)
++#define VPC_PACULLCOUNT (VPG_PA + PA_CULLCOUNT)
++
++/* HW: Setup Counters. */
++#define VPC_SETRIANGLECOUNT (VPG_SETUP + SE_TRIANGLECOUNT)
++#define VPC_SELINECOUNT (VPG_SETUP + SE_LINECOUNT)
++
++/* HW: RA Counters. */
++#define VPC_RAVALIDPIXCOUNT (VPG_RA + RA_VALIDPIXCOUNT)
++#define VPC_RATOTALQUADCOUNT (VPG_RA + RA_TOTALQUADCOUNT)
++#define VPC_RAVALIDQUADCOUNTEZ (VPG_RA + RA_VALIDQUADCOUNTEZ)
++#define VPC_RATOTALPRIMCOUNT (VPG_RA + RA_TOTALPRIMCOUNT)
++#define VPC_RAPIPECACHEMISSCOUNT (VPG_RA + RA_PIPECACHEMISSCOUNT)
++#define VPC_RAPREFCACHEMISSCOUNT (VPG_RA + RA_PREFCACHEMISSCOUNT)
++#define VPC_RAEEZCULLCOUNT (VPG_RA + RA_EEZCULLCOUNT)
++
++/* HW: TEX Counters. */
++#define VPC_TXTOTBILINEARREQ (VPG_TX + TX_TOTBILINEARREQ)
++#define VPC_TXTOTTRILINEARREQ (VPG_TX + TX_TOTTRILINEARREQ)
++#define VPC_TXTOTDISCARDTEXREQ (VPG_TX + TX_TOTDISCARDTEXREQ)
++#define VPC_TXTOTTEXREQ (VPG_TX + TX_TOTTEXREQ)
++#define VPC_TXMEMREADCOUNT (VPG_TX + TX_MEMREADCOUNT)
++#define VPC_TXMEMREADIN8BCOUNT (VPG_TX + TX_MEMREADIN8BCOUNT)
++#define VPC_TXCACHEMISSCOUNT (VPG_TX + TX_CACHEMISSCOUNT)
++#define VPC_TXCACHEHITTEXELCOUNT (VPG_TX + TX_CACHEHITTEXELCOUNT)
++#define VPC_TXCACHEMISSTEXELCOUNT (VPG_TX + TX_CACHEMISSTEXELCOUNT)
++
++/* HW: PE Counters. */
++#define VPC_PEKILLEDBYCOLOR (VPG_PE + PE_KILLEDBYCOLOR)
++#define VPC_PEKILLEDBYDEPTH (VPG_PE + PE_KILLEDBYDEPTH)
++#define VPC_PEDRAWNBYCOLOR (VPG_PE + PE_DRAWNBYCOLOR)
++#define VPC_PEDRAWNBYDEPTH (VPG_PE + PE_DRAWNBYDEPTH)
++
++/* HW: MC Counters. */
++#define VPC_MCREADREQ8BPIPE (VPG_MC + MC_READREQ8BPIPE)
++#define VPC_MCREADREQ8BIP (VPG_MC + MC_READREQ8BIP)
++#define VPC_MCWRITEREQ8BPIPE (VPG_MC + MC_WRITEREQ8BPIPE)
++
++/* HW: AXI Counters. */
++#define VPC_AXIREADREQSTALLED (VPG_AXI + AXI_READREQSTALLED)
++#define VPC_AXIWRITEREQSTALLED (VPG_AXI + AXI_WRITEREQSTALLED)
++#define VPC_AXIWRITEDATASTALLED (VPG_AXI + AXI_WRITEDATASTALLED)
++
++/* PROGRAM: Shader program counters. */
++#define VPC_PVSINSTRCOUNT (VPG_PVS + PVS_INSTRCOUNT)
++#define VPC_PVSALUINSTRCOUNT (VPG_PVS + PVS_ALUINSTRCOUNT)
++#define VPC_PVSTEXINSTRCOUNT (VPG_PVS + PVS_TEXINSTRCOUNT)
++#define VPC_PVSATTRIBCOUNT (VPG_PVS + PVS_ATTRIBCOUNT)
++#define VPC_PVSUNIFORMCOUNT (VPG_PVS + PVS_UNIFORMCOUNT)
++#define VPC_PVSFUNCTIONCOUNT (VPG_PVS + PVS_FUNCTIONCOUNT)
++#define VPC_PVSSOURCE (VPG_PVS + PVS_SOURCE)
++
++#define VPC_PPSINSTRCOUNT (VPG_PPS + PPS_INSTRCOUNT)
++#define VPC_PPSALUINSTRCOUNT (VPG_PPS + PPS_ALUINSTRCOUNT)
++#define VPC_PPSTEXINSTRCOUNT (VPG_PPS + PPS_TEXINSTRCOUNT)
++#define VPC_PPSATTRIBCOUNT (VPG_PPS + PPS_ATTRIBCOUNT)
++#define VPC_PPSUNIFORMCOUNT (VPG_PPS + PPS_UNIFORMCOUNT)
++#define VPC_PPSFUNCTIONCOUNT (VPG_PPS + PPS_FUNCTIONCOUNT)
++#define VPC_PPSSOURCE (VPG_PPS + PPS_SOURCE)
++
++#define VPC_PROGRAMHANDLE (VPG_PROG + 1)
++
++#define VPG_ES20_DRAW_NO (VPG_ES20_DRAW + 1)
++#define VPG_ES11_DRAW_NO (VPG_ES11_DRAW + 1)
++
++#define VPG_FRAME_USEVBO (VPG_FRAME + 1)
++
++#endif
++
++
++/* HW profile information. */
++typedef struct _gcsPROFILER_COUNTERS
++{
++ /* HW static counters. */
++ gctUINT32 gpuClock;
++ gctUINT32 axiClock;
++ gctUINT32 shaderClock;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuClockStart;
++ gctUINT32 gpuClockEnd;
++
++ /* HW vairable counters. */
++ gctUINT32 gpuCyclesCounter;
++ gctUINT32 gpuTotalCyclesCounter;
++ gctUINT32 gpuIdleCyclesCounter;
++ gctUINT32 gpuTotalRead64BytesPerFrame;
++ gctUINT32 gpuTotalWrite64BytesPerFrame;
++
++ /* PE */
++ gctUINT32 pe_pixel_count_killed_by_color_pipe;
++ gctUINT32 pe_pixel_count_killed_by_depth_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_color_pipe;
++ gctUINT32 pe_pixel_count_drawn_by_depth_pipe;
++
++ /* SH */
++ gctUINT32 ps_inst_counter;
++ gctUINT32 rendered_pixel_counter;
++ gctUINT32 vs_inst_counter;
++ gctUINT32 rendered_vertice_counter;
++ gctUINT32 vtx_branch_inst_counter;
++ gctUINT32 vtx_texld_inst_counter;
++ gctUINT32 pxl_branch_inst_counter;
++ gctUINT32 pxl_texld_inst_counter;
++
++ /* PA */
++ gctUINT32 pa_input_vtx_counter;
++ gctUINT32 pa_input_prim_counter;
++ gctUINT32 pa_output_prim_counter;
++ gctUINT32 pa_depth_clipped_counter;
++ gctUINT32 pa_trivial_rejected_counter;
++ gctUINT32 pa_culled_counter;
++
++ /* SE */
++ gctUINT32 se_culled_triangle_count;
++ gctUINT32 se_culled_lines_count;
++
++ /* RA */
++ gctUINT32 ra_valid_pixel_count;
++ gctUINT32 ra_total_quad_count;
++ gctUINT32 ra_valid_quad_count_after_early_z;
++ gctUINT32 ra_total_primitive_count;
++ gctUINT32 ra_pipe_cache_miss_counter;
++ gctUINT32 ra_prefetch_cache_miss_counter;
++ gctUINT32 ra_eez_culled_counter;
++
++ /* TX */
++ gctUINT32 tx_total_bilinear_requests;
++ gctUINT32 tx_total_trilinear_requests;
++ gctUINT32 tx_total_discarded_texture_requests;
++ gctUINT32 tx_total_texture_requests;
++ gctUINT32 tx_mem_read_count;
++ gctUINT32 tx_mem_read_in_8B_count;
++ gctUINT32 tx_cache_miss_count;
++ gctUINT32 tx_cache_hit_texel_count;
++ gctUINT32 tx_cache_miss_texel_count;
++
++ /* MC */
++ gctUINT32 mc_total_read_req_8B_from_pipeline;
++ gctUINT32 mc_total_read_req_8B_from_IP;
++ gctUINT32 mc_total_write_req_8B_from_pipeline;
++
++ /* HI */
++ gctUINT32 hi_axi_cycles_read_request_stalled;
++ gctUINT32 hi_axi_cycles_write_request_stalled;
++ gctUINT32 hi_axi_cycles_write_data_stalled;
++}
++gcsPROFILER_COUNTERS;
++
++/* HAL profile information. */
++typedef struct _gcsPROFILER
++{
++ gctUINT32 enable;
++ gctBOOL enableHal;
++ gctBOOL enableHW;
++ gctBOOL enableSH;
++ gctBOOL isSyncMode;
++
++ gctBOOL useSocket;
++ gctINT sockFd;
++
++ gctFILE file;
++
++ /* Aggregate Information */
++
++ /* Clock Info */
++ gctUINT64 frameStart;
++ gctUINT64 frameEnd;
++
++ /* Current frame information */
++ gctUINT32 frameNumber;
++ gctUINT64 frameStartTimeusec;
++ gctUINT64 frameEndTimeusec;
++ gctUINT64 frameStartCPUTimeusec;
++ gctUINT64 frameEndCPUTimeusec;
++
++#if PROFILE_HAL_COUNTERS
++ gctUINT32 vertexBufferTotalBytesAlloc;
++ gctUINT32 vertexBufferNewBytesAlloc;
++ int vertexBufferTotalObjectsAlloc;
++ int vertexBufferNewObjectsAlloc;
++
++ gctUINT32 indexBufferTotalBytesAlloc;
++ gctUINT32 indexBufferNewBytesAlloc;
++ int indexBufferTotalObjectsAlloc;
++ int indexBufferNewObjectsAlloc;
++
++ gctUINT32 textureBufferTotalBytesAlloc;
++ gctUINT32 textureBufferNewBytesAlloc;
++ int textureBufferTotalObjectsAlloc;
++ int textureBufferNewObjectsAlloc;
++
++ gctUINT32 numCommits;
++ gctUINT32 drawPointCount;
++ gctUINT32 drawLineCount;
++ gctUINT32 drawTriangleCount;
++ gctUINT32 drawVertexCount;
++ gctUINT32 redundantStateChangeCalls;
++#endif
++
++ gctUINT32 prevVSInstCount;
++ gctUINT32 prevVSBranchInstCount;
++ gctUINT32 prevVSTexInstCount;
++ gctUINT32 prevVSVertexCount;
++ gctUINT32 prevPSInstCount;
++ gctUINT32 prevPSBranchInstCount;
++ gctUINT32 prevPSTexInstCount;
++ gctUINT32 prevPSPixelCount;
++
++ char* psSource;
++ char* vsSource;
++
++}
++gcsPROFILER;
++
++/* Memory profile information. */
++struct _gcsMemProfile
++{
++ /* Memory Usage */
++ gctUINT32 videoMemUsed;
++ gctUINT32 systemMemUsed;
++ gctUINT32 commitBufferSize;
++ gctUINT32 contextBufferCopyBytes;
++};
++
++/* Shader profile information. */
++struct _gcsSHADER_PROFILER
++{
++ gctUINT32 shaderLength;
++ gctUINT32 shaderALUCycles;
++ gctUINT32 shaderTexLoadCycles;
++ gctUINT32 shaderTempRegCount;
++ gctUINT32 shaderSamplerRegCount;
++ gctUINT32 shaderInputRegCount;
++ gctUINT32 shaderOutputRegCount;
++};
++
++/* Initialize the gcsProfiler. */
++gceSTATUS
++gcoPROFILER_Initialize(
++ IN gcoHAL Hal
++ );
++
++/* Destroy the gcProfiler. */
++gceSTATUS
++gcoPROFILER_Destroy(
++ IN gcoHAL Hal
++ );
++
++/* Write data to profiler. */
++gceSTATUS
++gcoPROFILER_Write(
++ IN gcoHAL Hal,
++ IN gctSIZE_T ByteCount,
++ IN gctCONST_POINTER Data
++ );
++
++/* Flush data out. */
++gceSTATUS
++gcoPROFILER_Flush(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of frame. */
++gceSTATUS
++gcoPROFILER_EndFrame(
++ IN gcoHAL Hal
++ );
++
++/* Call to signal end of draw. */
++gceSTATUS
++gcoPROFILER_EndDraw(
++ IN gcoHAL Hal,
++ IN gctBOOL FirstDraw
++ );
++
++/* Increase profile counter Enum by Value. */
++gceSTATUS
++gcoPROFILER_Count(
++ IN gcoHAL Hal,
++ IN gctUINT32 Enum,
++ IN gctINT Value
++ );
++
++gceSTATUS
++gcoPROFILER_ShaderSourceFS(
++ IN gcoHAL Hal,
++ IN char* source
++ );
++
++gceSTATUS
++gcoPROFILER_ShaderSourceVS(
++ IN gcoHAL Hal,
++ IN char* source
++ );
++
++/* Profile input vertex shader. */
++gceSTATUS
++gcoPROFILER_ShaderVS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Vs
++ );
++
++/* Profile input fragment shader. */
++gceSTATUS
++gcoPROFILER_ShaderFS(
++ IN gcoHAL Hal,
++ IN gctPOINTER Fs
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_profiler_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_raster.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_raster.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_raster.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_raster.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,1010 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_raster_h_
++#define __gc_hal_raster_h_
++
++#include "gc_hal_enum.h"
++#include "gc_hal_types.h"
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** Object Declarations *****************************
++\******************************************************************************/
++
++typedef struct _gcoBRUSH * gcoBRUSH;
++typedef struct _gcoBRUSH_CACHE * gcoBRUSH_CACHE;
++
++/******************************************************************************\
++******************************** gcoBRUSH Object *******************************
++\******************************************************************************/
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructSingleColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructMonochrome(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_ConstructColor(
++ IN gcoHAL Hal,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Destroy an gcoBRUSH object. */
++gceSTATUS
++gcoBRUSH_Destroy(
++ IN gcoBRUSH Brush
++ );
++
++/******************************************************************************\
++******************************** gcoSURF Object *******************************
++\******************************************************************************/
++
++/* Set cipping rectangle. */
++gceSTATUS
++gcoSURF_SetClipping(
++ IN gcoSURF Surface
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gcoSURF_Clear2D(
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 LoColor,
++ IN gctUINT32 HiColor
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gcoSURF_Line(
++ IN gcoSURF Surface,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Generic rectangular blit. */
++gceSTATUS
++gcoSURF_Blit(
++ IN OPTIONAL gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gctUINT32 RectCount,
++ IN OPTIONAL gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN OPTIONAL gceSURF_TRANSPARENCY Transparency,
++ IN OPTIONAL gctUINT32 TransparencyColor,
++ IN OPTIONAL gctPOINTER Mask,
++ IN OPTIONAL gceSURF_MONOPACK MaskPack
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gcoSURF_MonoBlit(
++ IN gcoSURF DestSurface,
++ IN gctPOINTER Source,
++ IN gceSURF_MONOPACK SourcePack,
++ IN gcsPOINT_PTR SourceSize,
++ IN gcsPOINT_PTR SourceOrigin,
++ IN gcsRECT_PTR DestRect,
++ IN OPTIONAL gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Filter blit. */
++gceSTATUS
++gcoSURF_FilterBlit(
++ IN gcoSURF SrcSurface,
++ IN gcoSURF DestSurface,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gcoSURF_EnableAlphaBlend(
++ IN gcoSURF Surface,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gcoSURF_DisableAlphaBlend(
++ IN gcoSURF Surface
++ );
++
++/* Copy a rectangular area with format conversion. */
++gceSTATUS
++gcoSURF_CopyPixels(
++ IN gcoSURF Source,
++ IN gcoSURF Target,
++ IN gctINT SourceX,
++ IN gctINT SourceY,
++ IN gctINT TargetX,
++ IN gctINT TargetY,
++ IN gctINT Width,
++ IN gctINT Height
++ );
++
++/* Read surface pixel. */
++gceSTATUS
++gcoSURF_ReadPixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ OUT gctPOINTER PixelValue
++ );
++
++/* Write surface pixel. */
++gceSTATUS
++gcoSURF_WritePixel(
++ IN gcoSURF Surface,
++ IN gctPOINTER Memory,
++ IN gctINT X,
++ IN gctINT Y,
++ IN gceSURF_FORMAT Format,
++ IN gctPOINTER PixelValue
++ );
++
++gceSTATUS
++gcoSURF_SetDither(
++ IN gcoSURF Surface,
++ IN gctBOOL Dither
++ );
++/******************************************************************************\
++********************************** gco2D Object *********************************
++\******************************************************************************/
++
++/* Construct a new gco2D object. */
++gceSTATUS
++gco2D_Construct(
++ IN gcoHAL Hal,
++ OUT gco2D * Hardware
++ );
++
++/* Destroy an gco2D object. */
++gceSTATUS
++gco2D_Destroy(
++ IN gco2D Hardware
++ );
++
++/* Sets the maximum number of brushes in the brush cache. */
++gceSTATUS
++gco2D_SetBrushLimit(
++ IN gco2D Hardware,
++ IN gctUINT MaxCount
++ );
++
++/* Flush the brush. */
++gceSTATUS
++gco2D_FlushBrush(
++ IN gco2D Engine,
++ IN gcoBRUSH Brush,
++ IN gceSURF_FORMAT Format
++ );
++
++/* Program the specified solid color brush. */
++gceSTATUS
++gco2D_LoadSolidBrush(
++ IN gco2D Engine,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask
++ );
++
++gceSTATUS
++gco2D_LoadColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask
++ );
++
++/* Configure monochrome source. */
++gceSTATUS
++gco2D_SetMonochromeSource(
++ IN gco2D Engine,
++ IN gctBOOL ColorConvert,
++ IN gctUINT8 MonoTransparency,
++ IN gceSURF_MONOPACK DataPack,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source extension for full rotation. */
++gceSTATUS
++gco2D_SetColorSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_TRANSPARENCY Transparency,
++ IN gctUINT32 TransparencyColor
++ );
++
++/* Configure color source. */
++gceSTATUS
++gco2D_SetColorSourceAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctBOOL CoordRelative
++ );
++
++gceSTATUS
++gco2D_SetColorSourceN(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight,
++ IN gctUINT32 SurfaceNumber
++ );
++
++/* Configure masked color source. */
++gceSTATUS
++gco2D_SetMaskedSource(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack
++ );
++
++/* Configure masked color source extension for full rotation. */
++gceSTATUS
++gco2D_SetMaskedSourceEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_FORMAT Format,
++ IN gctBOOL CoordRelative,
++ IN gceSURF_MONOPACK MaskPack,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Setup the source rectangle. */
++gceSTATUS
++gco2D_SetSource(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect
++ );
++
++/* Set clipping rectangle. */
++gceSTATUS
++gco2D_SetClipping(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++/* Configure destination. */
++gceSTATUS
++gco2D_SetTarget(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth
++ );
++
++/* Configure destination extension for full rotation. */
++gceSTATUS
++gco2D_SetTargetEx(
++ IN gco2D Engine,
++ IN gctUINT32 Address,
++ IN gctUINT32 Stride,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++ );
++
++/* Calculate and program the stretch factors. */
++gceSTATUS
++gco2D_CalcStretchFactor(
++ IN gco2D Engine,
++ IN gctINT32 SrcSize,
++ IN gctINT32 DestSize,
++ OUT gctUINT32_PTR Factor
++ );
++
++gceSTATUS
++gco2D_SetStretchFactors(
++ IN gco2D Engine,
++ IN gctUINT32 HorFactor,
++ IN gctUINT32 VerFactor
++ );
++
++/* Calculate and program the stretch factors based on the rectangles. */
++gceSTATUS
++gco2D_SetStretchRectFactors(
++ IN gco2D Engine,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect
++ );
++
++/* Create a new solid color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructSingleColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 Color,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a new monochrome gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructMonochromeBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctUINT32 ColorConvert,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gctUINT64 Bits,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Create a color gcoBRUSH object. */
++gceSTATUS
++gco2D_ConstructColorBrush(
++ IN gco2D Engine,
++ IN gctUINT32 OriginX,
++ IN gctUINT32 OriginY,
++ IN gctPOINTER Address,
++ IN gceSURF_FORMAT Format,
++ IN gctUINT64 Mask,
++ gcoBRUSH * Brush
++ );
++
++/* Clear one or more rectangular areas. */
++gceSTATUS
++gco2D_Clear(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines. */
++gceSTATUS
++gco2D_Line(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gcoBRUSH Brush,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Draw one or more Bresenham lines based on the 32-bit color. */
++gceSTATUS
++gco2D_ColorLine(
++ IN gco2D Engine,
++ IN gctUINT32 LineCount,
++ IN gcsRECT_PTR Position,
++ IN gctUINT32 Color32,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Generic blit. */
++gceSTATUS
++gco2D_Blit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_Blend(
++ IN gco2D Engine,
++ IN gctUINT32 SrcCount,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Batch blit. */
++gceSTATUS
++gco2D_BatchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Stretch blit. */
++gceSTATUS
++gco2D_StretchBlit(
++ IN gco2D Engine,
++ IN gctUINT32 RectCount,
++ IN gcsRECT_PTR Rect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++/* Monochrome blit. */
++gceSTATUS
++gco2D_MonoBlit(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gcsPOINT_PTR StreamSize,
++ IN gcsRECT_PTR StreamRect,
++ IN gceSURF_MONOPACK SrcStreamPack,
++ IN gceSURF_MONOPACK DestStreamPack,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 FgRop,
++ IN gctUINT32 BgRop,
++ IN gceSURF_FORMAT DestFormat
++ );
++
++gceSTATUS
++gco2D_MonoBlitEx(
++ IN gco2D Engine,
++ IN gctPOINTER StreamBits,
++ IN gctINT32 StreamStride,
++ IN gctINT32 StreamWidth,
++ IN gctINT32 StreamHeight,
++ IN gctINT32 StreamX,
++ IN gctINT32 StreamY,
++ IN gctUINT32 FgColor,
++ IN gctUINT32 BgColor,
++ IN gcsRECT_PTR SrcRect,
++ IN gcsRECT_PTR DstRect,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++/* Set kernel size. */
++gceSTATUS
++gco2D_SetKernelSize(
++ IN gco2D Engine,
++ IN gctUINT8 HorKernelSize,
++ IN gctUINT8 VerKernelSize
++ );
++
++/* Set filter type. */
++gceSTATUS
++gco2D_SetFilterType(
++ IN gco2D Engine,
++ IN gceFILTER_TYPE FilterType
++ );
++
++/* Set the filter kernel by user. */
++gceSTATUS
++gco2D_SetUserFilterKernel(
++ IN gco2D Engine,
++ IN gceFILTER_PASS_TYPE PassType,
++ IN gctUINT16_PTR KernelArray
++ );
++
++/* Select the pass(es) to be done for user defined filter. */
++gceSTATUS
++gco2D_EnableUserFilterPasses(
++ IN gco2D Engine,
++ IN gctBOOL HorPass,
++ IN gctBOOL VerPass
++ );
++
++/* Frees the temporary buffer allocated by filter blit operation. */
++gceSTATUS
++gco2D_FreeFilterBuffer(
++ IN gco2D Engine
++ );
++
++/* Filter blit. */
++gceSTATUS
++gco2D_FilterBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Filter blit extension for full rotation. */
++gceSTATUS
++gco2D_FilterBlitEx(
++ IN gco2D Engine,
++ IN gctUINT32 SrcAddress,
++ IN gctUINT SrcStride,
++ IN gctUINT32 SrcUAddress,
++ IN gctUINT SrcUStride,
++ IN gctUINT32 SrcVAddress,
++ IN gctUINT SrcVStride,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32 DestAddress,
++ IN gctUINT DestStride,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++gceSTATUS
++gco2D_FilterBlitEx2(
++ IN gco2D Engine,
++ IN gctUINT32_PTR SrcAddresses,
++ IN gctUINT32 SrcAddressNum,
++ IN gctUINT32_PTR SrcStrides,
++ IN gctUINT32 SrcStrideNum,
++ IN gceTILING SrcTiling,
++ IN gceSURF_FORMAT SrcFormat,
++ IN gceSURF_ROTATION SrcRotation,
++ IN gctUINT32 SrcSurfaceWidth,
++ IN gctUINT32 SrcSurfaceHeight,
++ IN gcsRECT_PTR SrcRect,
++ IN gctUINT32_PTR DestAddresses,
++ IN gctUINT32 DestAddressNum,
++ IN gctUINT32_PTR DestStrides,
++ IN gctUINT32 DestStrideNum,
++ IN gceTILING DestTiling,
++ IN gceSURF_FORMAT DestFormat,
++ IN gceSURF_ROTATION DestRotation,
++ IN gctUINT32 DestSurfaceWidth,
++ IN gctUINT32 DestSurfaceHeight,
++ IN gcsRECT_PTR DestRect,
++ IN gcsRECT_PTR DestSubRect
++ );
++
++/* Enable alpha blending engine in the hardware and disengage the ROP engine. */
++gceSTATUS
++gco2D_EnableAlphaBlend(
++ IN gco2D Engine,
++ IN gctUINT8 SrcGlobalAlphaValue,
++ IN gctUINT8 DstGlobalAlphaValue,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode,
++ IN gceSURF_PIXEL_COLOR_MODE SrcColorMode,
++ IN gceSURF_PIXEL_COLOR_MODE DstColorMode
++ );
++
++/* Enable alpha blending engine in the hardware. */
++gceSTATUS
++gco2D_EnableAlphaBlendAdvanced(
++ IN gco2D Engine,
++ IN gceSURF_PIXEL_ALPHA_MODE SrcAlphaMode,
++ IN gceSURF_PIXEL_ALPHA_MODE DstAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE SrcGlobalAlphaMode,
++ IN gceSURF_GLOBAL_ALPHA_MODE DstGlobalAlphaMode,
++ IN gceSURF_BLEND_FACTOR_MODE SrcFactorMode,
++ IN gceSURF_BLEND_FACTOR_MODE DstFactorMode
++ );
++
++/* Enable alpha blending engine with Porter Duff rule. */
++gceSTATUS
++gco2D_SetPorterDuffBlending(
++ IN gco2D Engine,
++ IN gce2D_PORTER_DUFF_RULE Rule
++ );
++
++/* Disable alpha blending engine in the hardware and engage the ROP engine. */
++gceSTATUS
++gco2D_DisableAlphaBlend(
++ IN gco2D Engine
++ );
++
++/* Retrieve the maximum number of 32-bit data chunks for a single DE command. */
++gctUINT32
++gco2D_GetMaximumDataCount(
++ void
++ );
++
++/* Retrieve the maximum number of rectangles, that can be passed in a single DE command. */
++gctUINT32
++gco2D_GetMaximumRectCount(
++ void
++ );
++
++/* Returns the pixel alignment of the surface. */
++gceSTATUS
++gco2D_GetPixelAlignment(
++ gceSURF_FORMAT Format,
++ gcsPOINT_PTR Alignment
++ );
++
++/* Retrieve monochrome stream pack size. */
++gceSTATUS
++gco2D_GetPackSize(
++ IN gceSURF_MONOPACK StreamPack,
++ OUT gctUINT32 * PackWidth,
++ OUT gctUINT32 * PackHeight
++ );
++
++/* Flush the 2D pipeline. */
++gceSTATUS
++gco2D_Flush(
++ IN gco2D Engine
++ );
++
++/* Load 256-entry color table for INDEX8 source surfaces. */
++gceSTATUS
++gco2D_LoadPalette(
++ IN gco2D Engine,
++ IN gctUINT FirstIndex,
++ IN gctUINT IndexCount,
++ IN gctPOINTER ColorTable,
++ IN gctBOOL ColorConvert
++ );
++
++/* Enable/disable 2D BitBlt mirrorring. */
++gceSTATUS
++gco2D_SetBitBlitMirror(
++ IN gco2D Engine,
++ IN gctBOOL HorizontalMirror,
++ IN gctBOOL VerticalMirror
++ );
++
++/*
++ * Set the transparency for source, destination and pattern.
++ * It also enable or disable the DFB color key mode.
++ */
++gceSTATUS
++gco2D_SetTransparencyAdvancedEx(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency,
++ IN gctBOOL EnableDFBColorKeyMode
++ );
++
++/* Set the transparency for source, destination and pattern. */
++gceSTATUS
++gco2D_SetTransparencyAdvanced(
++ IN gco2D Engine,
++ IN gce2D_TRANSPARENCY SrcTransparency,
++ IN gce2D_TRANSPARENCY DstTransparency,
++ IN gce2D_TRANSPARENCY PatTransparency
++ );
++
++/* Set the source color key. */
++gceSTATUS
++gco2D_SetSourceColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the source color key range. */
++gceSTATUS
++gco2D_SetSourceColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the target color key. */
++gceSTATUS
++gco2D_SetTargetColorKeyAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKey
++ );
++
++/* Set the target color key range. */
++gceSTATUS
++gco2D_SetTargetColorKeyRangeAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 ColorKeyLow,
++ IN gctUINT32 ColorKeyHigh
++ );
++
++/* Set the YUV color space mode. */
++gceSTATUS
++gco2D_SetYUVColorMode(
++ IN gco2D Engine,
++ IN gce2D_YUV_COLOR_MODE Mode
++ );
++
++/* Setup the source global color value in ARGB8 format. */
++gceSTATUS gco2D_SetSourceGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the target global color value in ARGB8 format. */
++gceSTATUS gco2D_SetTargetGlobalColorAdvanced(
++ IN gco2D Engine,
++ IN gctUINT32 Color32
++ );
++
++/* Setup the source and target pixel multiply modes. */
++gceSTATUS
++gco2D_SetPixelMultiplyModeAdvanced(
++ IN gco2D Engine,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE SrcPremultiplySrcAlpha,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstPremultiplyDstAlpha,
++ IN gce2D_GLOBAL_COLOR_MULTIPLY_MODE SrcPremultiplyGlobalMode,
++ IN gce2D_PIXEL_COLOR_MULTIPLY_MODE DstDemultiplyDstAlpha
++ );
++
++/* Set the GPU clock cycles after which the idle engine will keep auto-flushing. */
++gceSTATUS
++gco2D_SetAutoFlushCycles(
++ IN gco2D Engine,
++ IN gctUINT32 Cycles
++ );
++
++#if VIVANTE_PROFILER
++/* Read the profile registers available in the 2D engine and sets them in the profile.
++ The function will also reset the pixelsRendered counter every time.
++*/
++gceSTATUS
++gco2D_ProfileEngine(
++ IN gco2D Engine,
++ OPTIONAL gcs2D_PROFILE_PTR Profile
++ );
++#endif
++
++/* Enable or disable 2D dithering. */
++gceSTATUS
++gco2D_EnableDither(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetGenericSource(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetGenericTarget(
++ IN gco2D Engine,
++ IN gctUINT32_PTR Addresses,
++ IN gctUINT32 AddressNum,
++ IN gctUINT32_PTR Strides,
++ IN gctUINT32 StrideNum,
++ IN gceTILING Tiling,
++ IN gceSURF_FORMAT Format,
++ IN gceSURF_ROTATION Rotation,
++ IN gctUINT32 SurfaceWidth,
++ IN gctUINT32 SurfaceHeight
++);
++
++gceSTATUS
++gco2D_SetCurrentSourceIndex(
++ IN gco2D Engine,
++ IN gctUINT32 SrcIndex
++ );
++
++gceSTATUS
++gco2D_MultiSourceBlit(
++ IN gco2D Engine,
++ IN gctUINT32 SourceMask,
++ IN gcsRECT_PTR DestRect,
++ IN gctUINT32 RectCount
++ );
++
++gceSTATUS
++gco2D_SetROP(
++ IN gco2D Engine,
++ IN gctUINT8 FgRop,
++ IN gctUINT8 BgRop
++ );
++
++gceSTATUS
++gco2D_SetGdiStretchMode(
++ IN gco2D Engine,
++ IN gctBOOL Enable
++ );
++
++gceSTATUS
++gco2D_SetSourceTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TSControl,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_SetTargetTileStatus(
++ IN gco2D Engine,
++ IN gce2D_TILE_STATUS_CONFIG TileStatusConfig,
++ IN gceSURF_FORMAT CompressedFormat,
++ IN gctUINT32 ClearValue,
++ IN gctUINT32 GpuAddress
++ );
++
++gceSTATUS
++gco2D_QueryU32(
++ IN gco2D Engine,
++ IN gce2D_QUERY Item,
++ OUT gctUINT32_PTR Value
++ );
++
++gceSTATUS
++gco2D_SetStateU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32 Value
++ );
++
++gceSTATUS
++gco2D_SetStateArrayI32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetStateArrayU32(
++ IN gco2D Engine,
++ IN gce2D_STATE State,
++ IN gctUINT32_PTR Array,
++ IN gctINT32 ArraySize
++ );
++
++gceSTATUS
++gco2D_SetTargetRect(
++ IN gco2D Engine,
++ IN gcsRECT_PTR Rect
++ );
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_raster_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_rename.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_rename.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_rename.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_rename.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,248 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_rename_h_
++#define __gc_hal_rename_h_
++
++
++#if defined(_HAL2D_APPENDIX)
++
++#define _HAL2D_RENAME_2(api, appendix) api ## appendix
++#define _HAL2D_RENAME_1(api, appendix) _HAL2D_RENAME_2(api, appendix)
++#define gcmHAL2D(api) _HAL2D_RENAME_1(api, _HAL2D_APPENDIX)
++
++
++#define gckOS_Construct gcmHAL2D(gckOS_Construct)
++#define gckOS_Destroy gcmHAL2D(gckOS_Destroy)
++#define gckOS_QueryVideoMemory gcmHAL2D(gckOS_QueryVideoMemory)
++#define gckOS_Allocate gcmHAL2D(gckOS_Allocate)
++#define gckOS_Free gcmHAL2D(gckOS_Free)
++#define gckOS_AllocateMemory gcmHAL2D(gckOS_AllocateMemory)
++#define gckOS_FreeMemory gcmHAL2D(gckOS_FreeMemory)
++#define gckOS_AllocatePagedMemory gcmHAL2D(gckOS_AllocatePagedMemory)
++#define gckOS_AllocatePagedMemoryEx gcmHAL2D(gckOS_AllocatePagedMemoryEx)
++#define gckOS_LockPages gcmHAL2D(gckOS_LockPages)
++#define gckOS_MapPages gcmHAL2D(gckOS_MapPages)
++#define gckOS_UnlockPages gcmHAL2D(gckOS_UnlockPages)
++#define gckOS_FreePagedMemory gcmHAL2D(gckOS_FreePagedMemory)
++#define gckOS_AllocateNonPagedMemory gcmHAL2D(gckOS_AllocateNonPagedMemory)
++#define gckOS_FreeNonPagedMemory gcmHAL2D(gckOS_FreeNonPagedMemory)
++#define gckOS_AllocateContiguous gcmHAL2D(gckOS_AllocateContiguous)
++#define gckOS_FreeContiguous gcmHAL2D(gckOS_FreeContiguous)
++#define gckOS_GetPageSize gcmHAL2D(gckOS_GetPageSize)
++#define gckOS_GetPhysicalAddress gcmHAL2D(gckOS_GetPhysicalAddress)
++#define gckOS_GetPhysicalAddressProcess gcmHAL2D(gckOS_GetPhysicalAddressProcess)
++#define gckOS_MapPhysical gcmHAL2D(gckOS_MapPhysical)
++#define gckOS_UnmapPhysical gcmHAL2D(gckOS_UnmapPhysical)
++#define gckOS_ReadRegister gcmHAL2D(gckOS_ReadRegister)
++#define gckOS_WriteRegister gcmHAL2D(gckOS_WriteRegister)
++#define gckOS_WriteMemory gcmHAL2D(gckOS_WriteMemory)
++#define gckOS_MapMemory gcmHAL2D(gckOS_MapMemory)
++#define gckOS_UnmapMemory gcmHAL2D(gckOS_UnmapMemory)
++#define gckOS_UnmapMemoryEx gcmHAL2D(gckOS_UnmapMemoryEx)
++#define gckOS_CreateMutex gcmHAL2D(gckOS_CreateMutex)
++#define gckOS_DeleteMutex gcmHAL2D(gckOS_DeleteMutex)
++#define gckOS_AcquireMutex gcmHAL2D(gckOS_AcquireMutex)
++#define gckOS_ReleaseMutex gcmHAL2D(gckOS_ReleaseMutex)
++#define gckOS_AtomicExchange gcmHAL2D(gckOS_AtomicExchange)
++#define gckOS_AtomicExchangePtr gcmHAL2D(gckOS_AtomicExchangePtr)
++#define gckOS_AtomConstruct gcmHAL2D(gckOS_AtomConstruct)
++#define gckOS_AtomDestroy gcmHAL2D(gckOS_AtomDestroy)
++#define gckOS_AtomGet gcmHAL2D(gckOS_AtomGet)
++#define gckOS_AtomIncrement gcmHAL2D(gckOS_AtomIncrement)
++#define gckOS_AtomDecrement gcmHAL2D(gckOS_AtomDecrement)
++#define gckOS_Delay gcmHAL2D(gckOS_Delay)
++#define gckOS_GetTime gcmHAL2D(gckOS_GetTime)
++#define gckOS_MemoryBarrier gcmHAL2D(gckOS_MemoryBarrier)
++#define gckOS_MapUserPointer gcmHAL2D(gckOS_MapUserPointer)
++#define gckOS_UnmapUserPointer gcmHAL2D(gckOS_UnmapUserPointer)
++#define gckOS_QueryNeedCopy gcmHAL2D(gckOS_QueryNeedCopy)
++#define gckOS_CopyFromUserData gcmHAL2D(gckOS_CopyFromUserData)
++#define gckOS_CopyToUserData gcmHAL2D(gckOS_CopyToUserData)
++#define gckOS_MapUserPhysical gcmHAL2D(gckOS_MapUserPhysical)
++#define gckOS_SuspendInterrupt gcmHAL2D(gckOS_SuspendInterrupt)
++#define gckOS_ResumeInterrupt gcmHAL2D(gckOS_ResumeInterrupt)
++#define gckOS_GetBaseAddress gcmHAL2D(gckOS_GetBaseAddress)
++#define gckOS_MemCopy gcmHAL2D(gckOS_MemCopy)
++#define gckOS_ZeroMemory gcmHAL2D(gckOS_ZeroMemory)
++#define gckOS_DeviceControl gcmHAL2D(gckOS_DeviceControl)
++#define gckOS_GetProcessID gcmHAL2D(gckOS_GetProcessID)
++#define gckOS_GetThreadID gcmHAL2D(gckOS_GetThreadID)
++#define gckOS_CreateSignal gcmHAL2D(gckOS_CreateSignal)
++#define gckOS_DestroySignal gcmHAL2D(gckOS_DestroySignal)
++#define gckOS_Signal gcmHAL2D(gckOS_Signal)
++#define gckOS_WaitSignal gcmHAL2D(gckOS_WaitSignal)
++#define gckOS_MapSignal gcmHAL2D(gckOS_MapSignal)
++#define gckOS_MapUserMemory gcmHAL2D(gckOS_MapUserMemory)
++#define gckOS_UnmapUserMemory gcmHAL2D(gckOS_UnmapUserMemory)
++#define gckOS_CreateUserSignal gcmHAL2D(gckOS_CreateUserSignal)
++#define gckOS_DestroyUserSignal gcmHAL2D(gckOS_DestroyUserSignal)
++#define gckOS_WaitUserSignal gcmHAL2D(gckOS_WaitUserSignal)
++#define gckOS_SignalUserSignal gcmHAL2D(gckOS_SignalUserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_UserSignal gcmHAL2D(gckOS_UserSignal)
++#define gckOS_CacheClean gcmHAL2D(gckOS_CacheClean)
++#define gckOS_CacheFlush gcmHAL2D(gckOS_CacheFlush)
++#define gckOS_SetDebugLevel gcmHAL2D(gckOS_SetDebugLevel)
++#define gckOS_SetDebugZone gcmHAL2D(gckOS_SetDebugZone)
++#define gckOS_SetDebugLevelZone gcmHAL2D(gckOS_SetDebugLevelZone)
++#define gckOS_SetDebugZones gcmHAL2D(gckOS_SetDebugZones)
++#define gckOS_SetDebugFile gcmHAL2D(gckOS_SetDebugFile)
++#define gckOS_Broadcast gcmHAL2D(gckOS_Broadcast)
++#define gckOS_SetGPUPower gcmHAL2D(gckOS_SetGPUPower)
++#define gckOS_CreateSemaphore gcmHAL2D(gckOS_CreateSemaphore)
++#define gckOS_DestroySemaphore gcmHAL2D(gckOS_DestroySemaphore)
++#define gckOS_AcquireSemaphore gcmHAL2D(gckOS_AcquireSemaphore)
++#define gckOS_ReleaseSemaphore gcmHAL2D(gckOS_ReleaseSemaphore)
++#define gckHEAP_Construct gcmHAL2D(gckHEAP_Construct)
++#define gckHEAP_Destroy gcmHAL2D(gckHEAP_Destroy)
++#define gckHEAP_Allocate gcmHAL2D(gckHEAP_Allocate)
++#define gckHEAP_Free gcmHAL2D(gckHEAP_Free)
++#define gckHEAP_ProfileStart gcmHAL2D(gckHEAP_ProfileStart)
++#define gckHEAP_ProfileEnd gcmHAL2D(gckHEAP_ProfileEnd)
++#define gckHEAP_Test gcmHAL2D(gckHEAP_Test)
++#define gckVIDMEM_Construct gcmHAL2D(gckVIDMEM_Construct)
++#define gckVIDMEM_Destroy gcmHAL2D(gckVIDMEM_Destroy)
++#define gckVIDMEM_Allocate gcmHAL2D(gckVIDMEM_Allocate)
++#define gckVIDMEM_AllocateLinear gcmHAL2D(gckVIDMEM_AllocateLinear)
++#define gckVIDMEM_Free gcmHAL2D(gckVIDMEM_Free)
++#define gckVIDMEM_Lock gcmHAL2D(gckVIDMEM_Lock)
++#define gckVIDMEM_Unlock gcmHAL2D(gckVIDMEM_Unlock)
++#define gckVIDMEM_ConstructVirtual gcmHAL2D(gckVIDMEM_ConstructVirtual)
++#define gckVIDMEM_DestroyVirtual gcmHAL2D(gckVIDMEM_DestroyVirtual)
++#define gckKERNEL_Construct gcmHAL2D(gckKERNEL_Construct)
++#define gckKERNEL_Destroy gcmHAL2D(gckKERNEL_Destroy)
++#define gckKERNEL_Dispatch gcmHAL2D(gckKERNEL_Dispatch)
++#define gckKERNEL_QueryVideoMemory gcmHAL2D(gckKERNEL_QueryVideoMemory)
++#define gckKERNEL_GetVideoMemoryPool gcmHAL2D(gckKERNEL_GetVideoMemoryPool)
++#define gckKERNEL_MapVideoMemory gcmHAL2D(gckKERNEL_MapVideoMemory)
++#define gckKERNEL_UnmapVideoMemory gcmHAL2D(gckKERNEL_UnmapVideoMemory)
++#define gckKERNEL_MapMemory gcmHAL2D(gckKERNEL_MapMemory)
++#define gckKERNEL_UnmapMemory gcmHAL2D(gckKERNEL_UnmapMemory)
++#define gckKERNEL_Notify gcmHAL2D(gckKERNEL_Notify)
++#define gckKERNEL_QuerySettings gcmHAL2D(gckKERNEL_QuerySettings)
++#define gckKERNEL_Recovery gcmHAL2D(gckKERNEL_Recovery)
++#define gckKERNEL_OpenUserData gcmHAL2D(gckKERNEL_OpenUserData)
++#define gckKERNEL_CloseUserData gcmHAL2D(gckKERNEL_CloseUserData)
++#define gckHARDWARE_Construct gcmHAL2D(gckHARDWARE_Construct)
++#define gckHARDWARE_Destroy gcmHAL2D(gckHARDWARE_Destroy)
++#define gckHARDWARE_QuerySystemMemory gcmHAL2D(gckHARDWARE_QuerySystemMemory)
++#define gckHARDWARE_BuildVirtualAddress gcmHAL2D(gckHARDWARE_BuildVirtualAddress)
++#define gckHARDWARE_QueryCommandBuffer gcmHAL2D(gckHARDWARE_QueryCommandBuffer)
++#define gckHARDWARE_WaitLink gcmHAL2D(gckHARDWARE_WaitLink)
++#define gckHARDWARE_Execute gcmHAL2D(gckHARDWARE_Execute)
++#define gckHARDWARE_End gcmHAL2D(gckHARDWARE_End)
++#define gckHARDWARE_Nop gcmHAL2D(gckHARDWARE_Nop)
++#define gckHARDWARE_Wait gcmHAL2D(gckHARDWARE_Wait)
++#define gckHARDWARE_PipeSelect gcmHAL2D(gckHARDWARE_PipeSelect)
++#define gckHARDWARE_Link gcmHAL2D(gckHARDWARE_Link)
++#define gckHARDWARE_Event gcmHAL2D(gckHARDWARE_Event)
++#define gckHARDWARE_QueryMemory gcmHAL2D(gckHARDWARE_QueryMemory)
++#define gckHARDWARE_QueryChipIdentity gcmHAL2D(gckHARDWARE_QueryChipIdentity)
++#define gckHARDWARE_QueryChipSpecs gcmHAL2D(gckHARDWARE_QueryChipSpecs)
++#define gckHARDWARE_QueryShaderCaps gcmHAL2D(gckHARDWARE_QueryShaderCaps)
++#define gckHARDWARE_ConvertFormat gcmHAL2D(gckHARDWARE_ConvertFormat)
++#define gckHARDWARE_SplitMemory gcmHAL2D(gckHARDWARE_SplitMemory)
++#define gckHARDWARE_AlignToTile gcmHAL2D(gckHARDWARE_AlignToTile)
++#define gckHARDWARE_UpdateQueueTail gcmHAL2D(gckHARDWARE_UpdateQueueTail)
++#define gckHARDWARE_ConvertLogical gcmHAL2D(gckHARDWARE_ConvertLogical)
++#define gckHARDWARE_ConvertPhysical gcmHAL2D(gckHARDWARE_ConvertPhysical)
++#define gckHARDWARE_Interrupt gcmHAL2D(gckHARDWARE_Interrupt)
++#define gckHARDWARE_SetMMU gcmHAL2D(gckHARDWARE_SetMMU)
++#define gckHARDWARE_FlushMMU gcmHAL2D(gckHARDWARE_FlushMMU)
++#define gckHARDWARE_GetIdle gcmHAL2D(gckHARDWARE_GetIdle)
++#define gckHARDWARE_Flush gcmHAL2D(gckHARDWARE_Flush)
++#define gckHARDWARE_SetFastClear gcmHAL2D(gckHARDWARE_SetFastClear)
++#define gckHARDWARE_ReadInterrupt gcmHAL2D(gckHARDWARE_ReadInterrupt)
++#define gckHARDWARE_SetPowerManagementState gcmHAL2D(gckHARDWARE_SetPowerManagementState)
++#define gckHARDWARE_QueryPowerManagementState gcmHAL2D(gckHARDWARE_QueryPowerManagementState)
++#define gckHARDWARE_ProfileEngine2D gcmHAL2D(gckHARDWARE_ProfileEngine2D)
++#define gckHARDWARE_InitializeHardware gcmHAL2D(gckHARDWARE_InitializeHardware)
++#define gckHARDWARE_Reset gcmHAL2D(gckHARDWARE_Reset)
++#define gckINTERRUPT_Construct gcmHAL2D(gckINTERRUPT_Construct)
++#define gckINTERRUPT_Destroy gcmHAL2D(gckINTERRUPT_Destroy)
++#define gckINTERRUPT_SetHandler gcmHAL2D(gckINTERRUPT_SetHandler)
++#define gckINTERRUPT_Notify gcmHAL2D(gckINTERRUPT_Notify)
++#define gckEVENT_Construct gcmHAL2D(gckEVENT_Construct)
++#define gckEVENT_Destroy gcmHAL2D(gckEVENT_Destroy)
++#define gckEVENT_AddList gcmHAL2D(gckEVENT_AddList)
++#define gckEVENT_FreeNonPagedMemory gcmHAL2D(gckEVENT_FreeNonPagedMemory)
++#define gckEVENT_FreeContiguousMemory gcmHAL2D(gckEVENT_FreeContiguousMemory)
++#define gckEVENT_FreeVideoMemory gcmHAL2D(gckEVENT_FreeVideoMemory)
++#define gckEVENT_Signal gcmHAL2D(gckEVENT_Signal)
++#define gckEVENT_Unlock gcmHAL2D(gckEVENT_Unlock)
++#define gckEVENT_Submit gcmHAL2D(gckEVENT_Submit)
++#define gckEVENT_Commit gcmHAL2D(gckEVENT_Commit)
++#define gckEVENT_Notify gcmHAL2D(gckEVENT_Notify)
++#define gckEVENT_Interrupt gcmHAL2D(gckEVENT_Interrupt)
++#define gckCOMMAND_Construct gcmHAL2D(gckCOMMAND_Construct)
++#define gckCOMMAND_Destroy gcmHAL2D(gckCOMMAND_Destroy)
++#define gckCOMMAND_EnterCommit gcmHAL2D(gckCOMMAND_EnterCommit)
++#define gckCOMMAND_ExitCommit gcmHAL2D(gckCOMMAND_ExitCommit)
++#define gckCOMMAND_Start gcmHAL2D(gckCOMMAND_Start)
++#define gckCOMMAND_Stop gcmHAL2D(gckCOMMAND_Stop)
++#define gckCOMMAND_Commit gcmHAL2D(gckCOMMAND_Commit)
++#define gckCOMMAND_Reserve gcmHAL2D(gckCOMMAND_Reserve)
++#define gckCOMMAND_Execute gcmHAL2D(gckCOMMAND_Execute)
++#define gckCOMMAND_Stall gcmHAL2D(gckCOMMAND_Stall)
++#define gckCOMMAND_Attach gcmHAL2D(gckCOMMAND_Attach)
++#define gckCOMMAND_Detach gcmHAL2D(gckCOMMAND_Detach)
++#define gckMMU_Construct gcmHAL2D(gckMMU_Construct)
++#define gckMMU_Destroy gcmHAL2D(gckMMU_Destroy)
++#define gckMMU_AllocatePages gcmHAL2D(gckMMU_AllocatePages)
++#define gckMMU_FreePages gcmHAL2D(gckMMU_FreePages)
++#define gckMMU_InsertNode gcmHAL2D(gckMMU_InsertNode)
++#define gckMMU_RemoveNode gcmHAL2D(gckMMU_RemoveNode)
++#define gckMMU_FreeHandleMemory gcmHAL2D(gckMMU_FreeHandleMemory)
++#define gckMMU_Test gcmHAL2D(gckMMU_Test)
++#define gckHARDWARE_QueryProfileRegisters gcmHAL2D(gckHARDWARE_QueryProfileRegisters)
++
++
++#define FindMdlMap gcmHAL2D(FindMdlMap)
++#define OnProcessExit gcmHAL2D(OnProcessExit)
++
++#define gckGALDEVICE_Destroy gcmHAL2D(gckGALDEVICE_Destroy)
++#define gckOS_Print gcmHAL2D(gckOS_Print)
++#define gckGALDEVICE_FreeMemory gcmHAL2D(gckGALDEVICE_FreeMemory)
++#define gckGALDEVICE_AllocateMemory gcmHAL2D(gckGALDEVICE_AllocateMemory)
++#define gckOS_DebugBreak gcmHAL2D(gckOS_DebugBreak)
++#define gckGALDEVICE_Release_ISR gcmHAL2D(gckGALDEVICE_Release_ISR)
++#define gckOS_Verify gcmHAL2D(gckOS_Verify)
++#define gckCOMMAND_Release gcmHAL2D(gckCOMMAND_Release)
++#define gckGALDEVICE_Stop gcmHAL2D(gckGALDEVICE_Stop)
++#define gckGALDEVICE_Construct gcmHAL2D(gckGALDEVICE_Construct)
++#define gckOS_DebugFatal gcmHAL2D(gckOS_DebugFatal)
++#define gckOS_DebugTrace gcmHAL2D(gckOS_DebugTrace)
++#define gckHARDWARE_GetBaseAddress gcmHAL2D(gckHARDWARE_GetBaseAddress)
++#define gckGALDEVICE_Setup_ISR gcmHAL2D(gckGALDEVICE_Setup_ISR)
++#define gckKERNEL_AttachProcess gcmHAL2D(gckKERNEL_AttachProcess)
++#define gckKERNEL_AttachProcessEx gcmHAL2D(gckKERNEL_AttachProcessEx)
++#define gckGALDEVICE_Start_Thread gcmHAL2D(gckGALDEVICE_Start_Thread)
++#define gckHARDWARE_QueryIdle gcmHAL2D(gckHARDWARE_QueryIdle)
++#define gckGALDEVICE_Start gcmHAL2D(gckGALDEVICE_Start)
++#define gckOS_GetKernelLogical gcmHAL2D(gckOS_GetKernelLogical)
++#define gckOS_DebugTraceZone gcmHAL2D(gckOS_DebugTraceZone)
++#define gckGALDEVICE_Stop_Thread gcmHAL2D(gckGALDEVICE_Stop_Thread)
++#define gckHARDWARE_NeedBaseAddress gcmHAL2D(gckHARDWARE_NeedBaseAddress)
++
++#endif
++
++#endif /* __gc_hal_rename_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_security_interface.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_security_interface.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_security_interface.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_security_interface.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,137 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef _GC_HAL_SECURITY_INTERFACE_H_
++#define _GC_HAL_SECURITY_INTERFACE_H_
++/*!
++ @brief Command codes between kernel module and TrustZone
++ @discussion
++ Critical services must be done in TrustZone to avoid sensitive content leak. Most of kernel module is kept in non-Secure os to minimize
++ code in TrustZone.
++ */
++typedef enum kernel_packet_command {
++ KERNEL_START_COMMAND,
++ KERNEL_SUBMIT,
++ KERNEL_MAP_MEMORY, /* */
++ KERNEL_UNMAP_MEMORY,
++ KERNEL_ALLOCATE_SECRUE_MEMORY, /*! Security memory management. */
++ KERNEL_FREE_SECURE_MEMORY,
++ KERNEL_EXECUTE, /* Execute a command buffer. */
++} kernel_packet_command_t;
++
++/*!
++ @brief gckCOMMAND Object requests TrustZone to start FE.
++ @discussion
++ DMA enabled register can only be written in TrustZone to avoid GPU from jumping to a hacked code.
++ Kernel module need use these command to ask TrustZone start command parser.
++ */
++struct kernel_start_command {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++};
++
++/*!
++ @brief gckCOMMAND Object requests TrustZone to submit command buffer.
++ @discussion
++ Code in trustzone will check content of command buffer after copying command buffer to TrustZone.
++ */
++struct kernel_submit {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++ gctUINT8 kernel_command; /*! Whether it is a kernel command. */
++ gctUINT32 command_buffer_handle; /*! Handle to command buffer. */
++ gctUINT32 offset; /* Offset in command buffer. */
++ gctUINT32 * command_buffer; /*! Content of command buffer need to be submit. */
++ gctUINT32 command_buffer_length; /*! Length of command buffer. */
++};
++
++
++/*!
++ @brief gckVIDMEM Object requests TrustZone to allocate security memory.
++ @discussion
++ Allocate a buffer from security GPU memory.
++ */
++struct kernel_allocate_security_memory {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT32 bytes; /*! Requested bytes. */
++ gctUINT32 memory_handle; /*! Handle of allocated memory. */
++};
++
++/*!
++ @brief gckVIDMEM Object requests TrustZone to allocate security memory.
++ @discussion
++ Free a video memory buffer from security GPU memory.
++ */
++struct kernel_free_security_memory {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT32 memory_handle; /*! Handle of allocated memory. */
++};
++
++struct kernel_execute {
++ kernel_packet_command_t command; /*! The command (always needs to be the first entry in a structure). */
++ gctUINT8 gpu; /*! Which GPU. */
++ gctUINT8 kernel_command; /*! Whether it is a kernel command. */
++ gctUINT32 * command_buffer; /*! Content of command buffer need to be submit. */
++ gctUINT32 command_buffer_length; /*! Length of command buffer. */
++};
++
++typedef struct kernel_map_scatter_gather {
++ gctUINT32 bytes;
++ gctUINT32 physical;
++ struct kernel_map_scatter_gather *next;
++}
++kernel_map_scatter_gather_t;
++
++struct kernel_map_memory {
++ kernel_packet_command_t command;
++ kernel_map_scatter_gather_t *scatter;
++ gctUINT32 *physicals;
++ gctUINT32 pageCount;
++ gctUINT32 gpuAddress;
++};
++
++struct kernel_unmap_memory {
++ gctUINT32 gpuAddress;
++ gctUINT32 pageCount;
++};
++
++typedef struct _gcsTA_INTERFACE {
++ kernel_packet_command_t command;
++ union {
++ struct kernel_submit Submit;
++ struct kernel_start_command StartCommand;
++ struct kernel_allocate_security_memory AllocateSecurityMemory;
++ struct kernel_execute Execute;
++ struct kernel_map_memory MapMemory;
++ struct kernel_unmap_memory UnmapMemory;
++ } u;
++ gceSTATUS result;
++} gcsTA_INTERFACE;
++
++enum {
++ gcvTA_COMMAND_INIT,
++ gcvTA_COMMAND_DISPATCH,
++
++ gcvTA_CALLBACK_ALLOC_SECURE_MEM,
++ gcvTA_CALLBACK_FREE_SECURE_MEM,
++};
++
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_statistics.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_statistics.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_statistics.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_statistics.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,115 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_statistics_h_
++#define __gc_hal_statistics_h_
++
++
++#define VIV_STAT_ENABLE_STATISTICS 0
++
++/* Toal number of frames for which the frame time is accounted. We have storage
++ to keep frame times for last this many frames.
++*/
++#define VIV_STAT_FRAME_BUFFER_SIZE 30
++
++/*
++ Total number of frames sampled for a mode. This means
++
++ # of frames for HZ Current : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ # of frames for HZ Switched : VIV_STAT_EARLY_Z_SAMPLE_FRAMES
++ +
++ --------------------------------------------------------
++ : (2 * VIV_STAT_EARLY_Z_SAMPLE_FRAMES) frames needed
++
++ IMPORTANT: This total must be smaller than VIV_STAT_FRAME_BUFFER_SIZE
++*/
++#define VIV_STAT_EARLY_Z_SAMPLE_FRAMES 7
++#define VIV_STAT_EARLY_Z_LATENCY_FRAMES 2
++
++/* Multiplication factor for previous Hz off mode. Make it more than 1.0 to advertise HZ on.*/
++#define VIV_STAT_EARLY_Z_FACTOR (1.05f)
++
++/* Defines the statistical data keys monitored by the statistics module */
++typedef enum _gceSTATISTICS
++{
++ gcvFRAME_FPS = 1,
++}
++gceSTATISTICS;
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS_EARLYZ
++{
++ gctUINT switchBackCount;
++ gctUINT nextCheckPoint;
++ gctBOOL disabled;
++}
++gcsSTATISTICS_EARLYZ;
++
++
++/* Defines the statistical data keys monitored by the statistics module */
++typedef enum _gceSTATISTICS_Call
++{
++ gcvSTAT_ES11_GLDRAWELEMENTS = 1,
++}
++gceSTATISTICS_Call;
++
++
++/* HAL statistics information. */
++typedef struct _gcsSTATISTICS
++{
++ gctUINT64 frameTime[VIV_STAT_FRAME_BUFFER_SIZE];
++ gctUINT64 previousFrameTime;
++ gctUINT frame;
++ gcsSTATISTICS_EARLYZ earlyZ;
++ gctUINT ES11_drawElementsCount;
++ gctBOOL applyRTestVAFix;
++}
++gcsSTATISTICS;
++
++
++/* Add a frame based data into current statistics. */
++void
++gcfSTATISTICS_AddData(
++ IN gceSTATISTICS Key,
++ IN gctUINT Value
++ );
++
++/* Marks the frame end and triggers statistical calculations and decisions.*/
++void
++gcfSTATISTICS_MarkFrameEnd (
++ void
++ );
++
++/* Sets whether the dynmaic HZ is disabled or not .*/
++void
++gcfSTATISTICS_DisableDynamicEarlyZ (
++ IN gctBOOL Disabled
++ );
++
++/* Checks whether or not glDrawArray function call will be discarded */
++gctBOOL
++gcfSTATISTICS_DiscardCall(
++ gceSTATISTICS_Call Function
++ );
++
++
++#endif /*__gc_hal_statistics_h_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_types.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_types.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_types.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_types.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,1088 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#ifndef __gc_hal_types_h_
++#define __gc_hal_types_h_
++
++#include "gc_hal_version.h"
++#include "gc_hal_options.h"
++
++#ifdef _WIN32
++#pragma warning(disable:4127) /* Conditional expression is constant (do { }
++ ** while(0)). */
++#pragma warning(disable:4100) /* Unreferenced formal parameter. */
++#pragma warning(disable:4204) /* Non-constant aggregate initializer (C99). */
++#pragma warning(disable:4131) /* Uses old-style declarator (for Bison and
++ ** Flex generated files). */
++#pragma warning(disable:4206) /* Translation unit is empty. */
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++** Platform macros.
++*/
++
++#if defined(__GNUC__)
++# define gcdHAS_ELLIPSES 1 /* GCC always has it. */
++#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
++# define gcdHAS_ELLIPSES 1 /* C99 has it. */
++#elif defined(_MSC_VER) && (_MSC_VER >= 1500)
++# define gcdHAS_ELLIPSES 1 /* MSVC 2007+ has it. */
++#elif defined(UNDER_CE)
++#if UNDER_CE >= 600
++# define gcdHAS_ELLIPSES 1
++# else
++# define gcdHAS_ELLIPSES 0
++# endif
++#else
++# error "gcdHAS_ELLIPSES: Platform could not be determined"
++#endif
++
++/******************************************************************************\
++************************************ Keyword ***********************************
++\******************************************************************************/
++
++#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))
++# define gcmINLINE inline /* C99 keyword. */
++#elif defined(__GNUC__)
++# define gcmINLINE __inline__ /* GNU keyword. */
++#elif defined(_MSC_VER) || defined(UNDER_CE)
++# define gcmINLINE __inline /* Internal keyword. */
++#else
++# error "gcmINLINE: Platform could not be determined"
++#endif
++
++/* Possible debug flags. */
++#define gcdDEBUG_NONE 0
++#define gcdDEBUG_ALL (1 << 0)
++#define gcdDEBUG_FATAL (1 << 1)
++#define gcdDEBUG_TRACE (1 << 2)
++#define gcdDEBUG_BREAK (1 << 3)
++#define gcdDEBUG_ASSERT (1 << 4)
++#define gcdDEBUG_CODE (1 << 5)
++#define gcdDEBUG_STACK (1 << 6)
++
++#define gcmIS_DEBUG(flag) ( gcdDEBUG & (flag | gcdDEBUG_ALL) )
++
++#ifndef gcdDEBUG
++#if (defined(DBG) && DBG) || defined(DEBUG) || defined(_DEBUG)
++# define gcdDEBUG gcdDEBUG_ALL
++# else
++# define gcdDEBUG gcdDEBUG_NONE
++# endif
++#endif
++
++#ifdef _USRDLL
++#ifdef _MSC_VER
++#ifdef HAL_EXPORTS
++# define HALAPI __declspec(dllexport)
++# else
++# define HALAPI __declspec(dllimport)
++# endif
++# define HALDECL __cdecl
++# else
++#ifdef HAL_EXPORTS
++# define HALAPI
++# else
++# define HALAPI extern
++# endif
++# endif
++#else
++# define HALAPI
++# define HALDECL
++#endif
++
++/******************************************************************************\
++********************************** Common Types ********************************
++\******************************************************************************/
++
++#define gcvFALSE 0
++#define gcvTRUE 1
++
++#define gcvINFINITE ((gctUINT32) ~0U)
++
++#define gcvINVALID_HANDLE ((gctHANDLE) ~0U)
++
++typedef int gctBOOL;
++typedef gctBOOL * gctBOOL_PTR;
++
++typedef int gctINT;
++typedef long gctLONG;
++typedef signed char gctINT8;
++typedef signed short gctINT16;
++typedef signed int gctINT32;
++typedef signed long long gctINT64;
++
++typedef gctINT * gctINT_PTR;
++typedef gctINT8 * gctINT8_PTR;
++typedef gctINT16 * gctINT16_PTR;
++typedef gctINT32 * gctINT32_PTR;
++typedef gctINT64 * gctINT64_PTR;
++
++typedef unsigned int gctUINT;
++typedef unsigned char gctUINT8;
++typedef unsigned short gctUINT16;
++typedef unsigned int gctUINT32;
++typedef unsigned long long gctUINT64;
++typedef unsigned long gctUINTPTR_T;
++
++typedef gctUINT * gctUINT_PTR;
++typedef gctUINT8 * gctUINT8_PTR;
++typedef gctUINT16 * gctUINT16_PTR;
++typedef gctUINT32 * gctUINT32_PTR;
++typedef gctUINT64 * gctUINT64_PTR;
++
++typedef unsigned long gctSIZE_T;
++typedef gctSIZE_T * gctSIZE_T_PTR;
++
++#ifdef __cplusplus
++# define gcvNULL 0
++#else
++# define gcvNULL ((void *) 0)
++#endif
++
++typedef float gctFLOAT;
++typedef signed int gctFIXED_POINT;
++typedef float * gctFLOAT_PTR;
++
++typedef void * gctPHYS_ADDR;
++typedef void * gctHANDLE;
++typedef void * gctFILE;
++typedef void * gctSIGNAL;
++typedef void * gctWINDOW;
++typedef void * gctIMAGE;
++typedef void * gctSYNC_POINT;
++
++typedef void * gctSEMAPHORE;
++
++typedef void * gctPOINTER;
++typedef const void * gctCONST_POINTER;
++
++typedef char gctCHAR;
++typedef char * gctSTRING;
++typedef const char * gctCONST_STRING;
++
++typedef struct _gcsCOUNT_STRING
++{
++ gctSIZE_T Length;
++ gctCONST_STRING String;
++}
++gcsCOUNT_STRING;
++
++typedef union _gcuFLOAT_UINT32
++{
++ gctFLOAT f;
++ gctUINT32 u;
++}
++gcuFLOAT_UINT32;
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++/* Stringizing macro. */
++#define gcmSTRING(Value) #Value
++
++/******************************************************************************\
++******************************* Fixed Point Math *******************************
++\******************************************************************************/
++
++#define gcmXMultiply(x1, x2) gcoMATH_MultiplyFixed(x1, x2)
++#define gcmXDivide(x1, x2) gcoMATH_DivideFixed(x1, x2)
++#define gcmXMultiplyDivide(x1, x2, x3) gcoMATH_MultiplyDivideFixed(x1, x2, x3)
++
++/* 2D Engine profile. */
++typedef struct _gcs2D_PROFILE
++{
++ /* Cycle count.
++ 32bit counter incremented every 2D clock cycle.
++ Wraps back to 0 when the counter overflows.
++ */
++ gctUINT32 cycleCount;
++
++ /* Pixels rendered by the 2D engine.
++ Resets to 0 every time it is read. */
++ gctUINT32 pixelsRendered;
++}
++gcs2D_PROFILE;
++
++/* Macro to combine four characters into a Charcater Code. */
++#define gcmCC(c1, c2, c3, c4) \
++( \
++ (char) (c1) \
++ | \
++ ((char) (c2) << 8) \
++ | \
++ ((char) (c3) << 16) \
++ | \
++ ((char) (c4) << 24) \
++)
++
++#define gcmPRINTABLE(c) ((((c) >= ' ') && ((c) <= '}')) ? ((c) != '%' ? (c) : ' ') : ' ')
++
++#define gcmCC_PRINT(cc) \
++ gcmPRINTABLE((char) ( (cc) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 8) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 16) & 0xFF)), \
++ gcmPRINTABLE((char) (((cc) >> 24) & 0xFF))
++
++/******************************************************************************\
++****************************** Function Parameters *****************************
++\******************************************************************************/
++
++#define IN
++#define OUT
++#define OPTIONAL
++
++/******************************************************************************\
++********************************* Status Codes *********************************
++\******************************************************************************/
++
++typedef enum _gceSTATUS
++{
++ gcvSTATUS_OK = 0,
++ gcvSTATUS_FALSE = 0,
++ gcvSTATUS_TRUE = 1,
++ gcvSTATUS_NO_MORE_DATA = 2,
++ gcvSTATUS_CACHED = 3,
++ gcvSTATUS_MIPMAP_TOO_LARGE = 4,
++ gcvSTATUS_NAME_NOT_FOUND = 5,
++ gcvSTATUS_NOT_OUR_INTERRUPT = 6,
++ gcvSTATUS_MISMATCH = 7,
++ gcvSTATUS_MIPMAP_TOO_SMALL = 8,
++ gcvSTATUS_LARGER = 9,
++ gcvSTATUS_SMALLER = 10,
++ gcvSTATUS_CHIP_NOT_READY = 11,
++ gcvSTATUS_NEED_CONVERSION = 12,
++ gcvSTATUS_SKIP = 13,
++ gcvSTATUS_DATA_TOO_LARGE = 14,
++ gcvSTATUS_INVALID_CONFIG = 15,
++ gcvSTATUS_CHANGED = 16,
++ gcvSTATUS_NOT_SUPPORT_DITHER = 17,
++ gcvSTATUS_EXECUTED = 18,
++ gcvSTATUS_TERMINATE = 19,
++
++ gcvSTATUS_CONVERT_TO_SINGLE_STREAM = 20,
++
++ gcvSTATUS_INVALID_ARGUMENT = -1,
++ gcvSTATUS_INVALID_OBJECT = -2,
++ gcvSTATUS_OUT_OF_MEMORY = -3,
++ gcvSTATUS_MEMORY_LOCKED = -4,
++ gcvSTATUS_MEMORY_UNLOCKED = -5,
++ gcvSTATUS_HEAP_CORRUPTED = -6,
++ gcvSTATUS_GENERIC_IO = -7,
++ gcvSTATUS_INVALID_ADDRESS = -8,
++ gcvSTATUS_CONTEXT_LOSSED = -9,
++ gcvSTATUS_TOO_COMPLEX = -10,
++ gcvSTATUS_BUFFER_TOO_SMALL = -11,
++ gcvSTATUS_INTERFACE_ERROR = -12,
++ gcvSTATUS_NOT_SUPPORTED = -13,
++ gcvSTATUS_MORE_DATA = -14,
++ gcvSTATUS_TIMEOUT = -15,
++ gcvSTATUS_OUT_OF_RESOURCES = -16,
++ gcvSTATUS_INVALID_DATA = -17,
++ gcvSTATUS_INVALID_MIPMAP = -18,
++ gcvSTATUS_NOT_FOUND = -19,
++ gcvSTATUS_NOT_ALIGNED = -20,
++ gcvSTATUS_INVALID_REQUEST = -21,
++ gcvSTATUS_GPU_NOT_RESPONDING = -22,
++ gcvSTATUS_TIMER_OVERFLOW = -23,
++ gcvSTATUS_VERSION_MISMATCH = -24,
++ gcvSTATUS_LOCKED = -25,
++ gcvSTATUS_INTERRUPTED = -26,
++ gcvSTATUS_DEVICE = -27,
++ gcvSTATUS_NOT_MULTI_PIPE_ALIGNED = -28,
++
++ /* Linker errors. */
++ gcvSTATUS_GLOBAL_TYPE_MISMATCH = -1000,
++ gcvSTATUS_TOO_MANY_ATTRIBUTES = -1001,
++ gcvSTATUS_TOO_MANY_UNIFORMS = -1002,
++ gcvSTATUS_TOO_MANY_VARYINGS = -1003,
++ gcvSTATUS_UNDECLARED_VARYING = -1004,
++ gcvSTATUS_VARYING_TYPE_MISMATCH = -1005,
++ gcvSTATUS_MISSING_MAIN = -1006,
++ gcvSTATUS_NAME_MISMATCH = -1007,
++ gcvSTATUS_INVALID_INDEX = -1008,
++ gcvSTATUS_UNIFORM_TYPE_MISMATCH = -1009,
++
++ /* Compiler errors. */
++ gcvSTATUS_COMPILER_FE_PREPROCESSOR_ERROR = -2000,
++ gcvSTATUS_COMPILER_FE_PARSER_ERROR = -2001,
++}
++gceSTATUS;
++
++/******************************************************************************\
++********************************* Status Macros ********************************
++\******************************************************************************/
++
++#define gcmIS_ERROR(status) (status < 0)
++#define gcmNO_ERROR(status) (status >= 0)
++#define gcmIS_SUCCESS(status) (status == gcvSTATUS_OK)
++
++/******************************************************************************\
++********************************* Field Macros *********************************
++\******************************************************************************/
++
++#define __gcmSTART(reg_field) \
++ (0 ? reg_field)
++
++#define __gcmEND(reg_field) \
++ (1 ? reg_field)
++
++#define __gcmGETSIZE(reg_field) \
++ (__gcmEND(reg_field) - __gcmSTART(reg_field) + 1)
++
++#define __gcmALIGN(data, reg_field) \
++ (((gctUINT32) (data)) << __gcmSTART(reg_field))
++
++#define __gcmMASK(reg_field) \
++ ((gctUINT32) ((__gcmGETSIZE(reg_field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg_field)))))
++
++/*******************************************************************************
++**
++** gcmFIELDMASK
++**
++** Get aligned field mask.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMASK(reg, field) \
++( \
++ __gcmALIGN(__gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETFIELD
++**
++** Extract the value of a field from specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETFIELD(data, reg, field) \
++( \
++ ((((gctUINT32) (data)) >> __gcmSTART(reg##_##field)) \
++ & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELD
++**
++** Set the value of a field within specified data.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETFIELD(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN((gctUINT32) (value) \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmSETFIELDVALUE
++**
++** Set the value of a field within specified data with a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmSETFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) \
++ & ~__gcmALIGN(__gcmMASK(reg##_##field), reg##_##field)) \
++ | __gcmALIGN(reg##_##field##_##value \
++ & __gcmMASK(reg##_##field), reg##_##field) \
++)
++
++/*******************************************************************************
++**
++** gcmGETMASKEDFIELDMASK
++**
++** Determine field mask of a masked field.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmGETMASKEDFIELDMASK(reg, field) \
++( \
++ gcmSETFIELD(0, reg, field, ~0) | \
++ gcmSETFIELD(0, reg, MASK_ ## field, ~0) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELD
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELD(reg, field, value) \
++( \
++ gcmSETFIELD (~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmSETMASKEDFIELDVALUE
++**
++** Set the value of a masked field with specified data.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmSETMASKEDFIELDVALUE(reg, field, value) \
++( \
++ gcmSETFIELDVALUE(~0, reg, field, value) & \
++ gcmSETFIELDVALUE(~0, reg, MASK_ ## field, ENABLED) \
++)
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDVALUE
++**
++** Verify if the value of a field within specified data equals a
++** predefined value.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Name of the value within the field.
++*/
++#define gcmVERIFYFIELDVALUE(data, reg, field, value) \
++( \
++ (((gctUINT32) (data)) >> __gcmSTART(reg##_##field) & \
++ __gcmMASK(reg##_##field)) \
++ == \
++ (reg##_##field##_##value & __gcmMASK(reg##_##field)) \
++)
++
++/*******************************************************************************
++** Bit field macros.
++*/
++
++#define __gcmSTARTBIT(Field) \
++ ( 1 ? Field )
++
++#define __gcmBITSIZE(Field) \
++ ( 0 ? Field )
++
++#define __gcmBITMASK(Field) \
++( \
++ (1 << __gcmBITSIZE(Field)) - 1 \
++)
++
++#define gcmGETBITS(Value, Type, Field) \
++( \
++ ( ((Type) (Value)) >> __gcmSTARTBIT(Field) ) \
++ & \
++ __gcmBITMASK(Field) \
++)
++
++#define gcmSETBITS(Value, Type, Field, NewValue) \
++( \
++ ( ((Type) (Value)) \
++ & ~(__gcmBITMASK(Field) << __gcmSTARTBIT(Field)) \
++ ) \
++ | \
++ ( ( ((Type) (NewValue)) \
++ & __gcmBITMASK(Field) \
++ ) << __gcmSTARTBIT(Field) \
++ ) \
++)
++
++/*******************************************************************************
++**
++** gcmISINREGRANGE
++**
++** Verify whether the specified address is in the register range.
++**
++** ARGUMENTS:
++**
++** Address Address to be verified.
++** Name Name of a register.
++*/
++
++#define gcmISINREGRANGE(Address, Name) \
++( \
++ ((Address & (~0U << Name ## _LSB)) == (Name ## _Address >> 2)) \
++)
++
++/*******************************************************************************
++**
++** A set of macros to aid state loading.
++**
++** ARGUMENTS:
++**
++** CommandBuffer Pointer to a gcoCMDBUF object.
++** StateDelta Pointer to a gcsSTATE_DELTA state delta structure.
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** PartOfContext Whether or not the state is a part of the context.
++** FixedPoint Whether or not the state is of the fixed point format.
++** Count Number of consecutive states to be loaded.
++** Address State address.
++** Data Data to be set to the state.
++*/
++
++/*----------------------------------------------------------------------------*/
++
++#if gcmIS_DEBUG(gcdDEBUG_CODE)
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count) \
++ CommandBuffer->lastLoadStatePtr = gcmPTR_TO_UINT64(Memory); \
++ CommandBuffer->lastLoadStateAddress = Address; \
++ CommandBuffer->lastLoadStateCount = Count
++
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address) \
++ gcmASSERT( \
++ (gctUINT) (Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastLoadStatePtr, gctUINT32_PTR) - 1) \
++ == \
++ (gctUINT) (Address - CommandBuffer->lastLoadStateAddress) \
++ ); \
++ \
++ gcmASSERT(CommandBuffer->lastLoadStateCount > 0); \
++ \
++ CommandBuffer->lastLoadStateCount -= 1
++
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer) \
++ gcmASSERT(CommandBuffer->lastLoadStateCount == 0)
++
++#else
++
++# define gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count)
++# define gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address)
++# define gcmVERIFYLOADSTATEDONE(CommandBuffer)
++
++#endif
++
++#if gcdSECURE_USER
++
++# define gcmDEFINESECUREUSER() \
++ gctUINT __secure_user_offset__; \
++ gctUINT32_PTR __secure_user_hintArray__;
++
++# define gcmBEGINSECUREUSER() \
++ __secure_user_offset__ = reserve->lastOffset; \
++ \
++ __secure_user_hintArray__ = gcmUINT64_TO_PTR(reserve->hintArrayTail)
++
++# define gcmENDSECUREUSER() \
++ reserve->hintArrayTail = gcmPTR_TO_UINT64(__secure_user_hintArray__)
++
++# define gcmSKIPSECUREUSER() \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32)
++
++# define gcmUPDATESECUREUSER() \
++ *__secure_user_hintArray__ = __secure_user_offset__; \
++ \
++ __secure_user_offset__ += gcmSIZEOF(gctUINT32); \
++ __secure_user_hintArray__ += 1
++
++#else
++
++# define gcmDEFINESECUREUSER()
++# define gcmBEGINSECUREUSER()
++# define gcmENDSECUREUSER()
++# define gcmSKIPSECUREUSER()
++# define gcmUPDATESECUREUSER()
++
++#endif
++
++/*----------------------------------------------------------------------------*/
++
++#if gcdDUMP
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data) \
++ if (FixedPoint) \
++ { \
++ gcmDUMP(gcvNULL, "@[state.x 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ } \
++ else \
++ { \
++ gcmDUMP(gcvNULL, "@[state 0x%04X 0x%08X]", \
++ Address, Data \
++ ); \
++ }
++#else
++# define gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, Data)
++#endif
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmDEFINESTATEBUFFER(CommandBuffer, StateDelta, Memory, ReserveSize) \
++ gcmDEFINESECUREUSER() \
++ gctSIZE_T ReserveSize; \
++ gcoCMDBUF CommandBuffer; \
++ gctUINT32_PTR Memory; \
++ gcsSTATE_DELTA_PTR StateDelta
++
++#define gcmBEGINSTATEBUFFER(Hardware, CommandBuffer, StateDelta, Memory, ReserveSize) \
++{ \
++ gcmONERROR(gcoBUFFER_Reserve( \
++ Hardware->buffer, ReserveSize, gcvTRUE, &CommandBuffer \
++ )); \
++ \
++ Memory = gcmUINT64_TO_PTR(CommandBuffer->lastReserve); \
++ \
++ StateDelta = Hardware->delta; \
++ \
++ gcmBEGINSECUREUSER(); \
++}
++
++#define gcmENDSTATEBUFFER(CommandBuffer, Memory, ReserveSize) \
++{ \
++ gcmENDSECUREUSER(); \
++ \
++ gcmASSERT( \
++ gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT8_PTR) + ReserveSize \
++ == \
++ (gctUINT8_PTR) Memory \
++ ); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, Count) \
++{ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++ gcmASSERT((gctUINT32)Count <= 1024); \
++ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmSTORELOADSTATE(CommandBuffer, Memory, Address, Count); \
++ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_LOAD_STATE_COMMAND, OPCODE, LOAD_STATE) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, FLOAT, FixedPoint) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_LOAD_STATE_COMMAND, ADDRESS, Address); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmENDSTATEBATCH(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ gcmASSERT(((Memory - gcmUINT64_TO_TYPE(CommandBuffer->lastReserve, gctUINT32_PTR)) & 1) == 0); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcoHARDWARE_UpdateDelta( \
++ StateDelta, FixedPoint, Address, 0, __temp_data32__ \
++ ); \
++ \
++ gcmDUMPSTATEDATA(StateDelta, FixedPoint, Address, __temp_data32__); \
++ \
++ gcmUPDATESECUREUSER(); \
++}
++
++#define gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data) \
++{ \
++ gctUINT32 __temp_data32__; \
++ \
++ gcmVERIFYLOADSTATE(CommandBuffer, Memory, Address); \
++ \
++ __temp_data32__ = Data; \
++ \
++ *Memory++ = __temp_data32__; \
++ \
++ gcmDUMPSTATEDATA(StateDelta, gcvFALSE, Address, __temp_data32__); \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++#define gcmSETFILLER(CommandBuffer, Memory) \
++{ \
++ gcmVERIFYLOADSTATEDONE(CommandBuffer); \
++ \
++ Memory += 1; \
++ \
++ gcmSKIPSECUREUSER(); \
++}
++
++/*----------------------------------------------------------------------------*/
++
++#define gcmSETSINGLESTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETSTATEDATA(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++#define gcmSETSINGLECTRLSTATE(StateDelta, CommandBuffer, Memory, FixedPoint, \
++ Address, Data) \
++{ \
++ gcmBEGINSTATEBATCH(CommandBuffer, Memory, FixedPoint, Address, 1); \
++ gcmSETCTRLSTATE(StateDelta, CommandBuffer, Memory, Address, Data); \
++ gcmENDSTATEBATCH(CommandBuffer, Memory); \
++}
++
++
++/*******************************************************************************
++**
++** gcmSETSTARTDECOMMAND
++**
++** Form a START_DE command.
++**
++** ARGUMENTS:
++**
++** Memory Destination memory pointer of gctUINT32_PTR type.
++** Count Number of the rectangles.
++*/
++
++#define gcmSETSTARTDECOMMAND(Memory, Count) \
++{ \
++ *Memory++ \
++ = gcmSETFIELDVALUE(0, AQ_COMMAND_START_DE_COMMAND, OPCODE, START_DE) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, COUNT, Count) \
++ | gcmSETFIELD (0, AQ_COMMAND_START_DE_COMMAND, DATA_COUNT, 0); \
++ \
++ *Memory++ = 0xDEADDEED; \
++}
++
++/******************************************************************************\
++******************************** Ceiling Macro ********************************
++\******************************************************************************/
++#define gcmCEIL(x) ((x - (gctUINT32)x) == 0 ? (gctUINT32)x : (gctUINT32)x + 1)
++
++/******************************************************************************\
++******************************** Min/Max Macros ********************************
++\******************************************************************************/
++
++#define gcmMIN(x, y) (((x) <= (y)) ? (x) : (y))
++#define gcmMAX(x, y) (((x) >= (y)) ? (x) : (y))
++#define gcmCLAMP(x, min, max) (((x) < (min)) ? (min) : \
++ ((x) > (max)) ? (max) : (x))
++#define gcmABS(x) (((x) < 0) ? -(x) : (x))
++#define gcmNEG(x) (((x) < 0) ? (x) : -(x))
++
++/*******************************************************************************
++**
++** gcmPTR2INT
++**
++** Convert a pointer to an integer value.
++**
++** ARGUMENTS:
++**
++** p Pointer value.
++*/
++#if defined(_WIN32) || (defined(__LP64__) && __LP64__)
++# define gcmPTR2INT(p) \
++ ( \
++ (gctUINT32) (gctUINT64) (p) \
++ )
++#else
++# define gcmPTR2INT(p) \
++ ( \
++ (gctUINT32) (p) \
++ )
++#endif
++
++/*******************************************************************************
++**
++** gcmINT2PTR
++**
++** Convert an integer value into a pointer.
++**
++** ARGUMENTS:
++**
++** v Integer value.
++*/
++#ifdef __LP64__
++# define gcmINT2PTR(i) \
++ ( \
++ (gctPOINTER) (gctINT64) (i) \
++ )
++#else
++# define gcmINT2PTR(i) \
++ ( \
++ (gctPOINTER) (i) \
++ )
++#endif
++
++/*******************************************************************************
++**
++** gcmOFFSETOF
++**
++** Compute the byte offset of a field inside a structure.
++**
++** ARGUMENTS:
++**
++** s Structure name.
++** field Field name.
++*/
++#define gcmOFFSETOF(s, field) \
++( \
++ gcmPTR2INT(& (((struct s *) 0)->field)) \
++)
++
++#define gcmSWAB32(x) ((gctUINT32)( \
++ (((gctUINT32)(x) & (gctUINT32)0x000000FFUL) << 24) | \
++ (((gctUINT32)(x) & (gctUINT32)0x0000FF00UL) << 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0x00FF0000UL) >> 8) | \
++ (((gctUINT32)(x) & (gctUINT32)0xFF000000UL) >> 24)))
++
++/*******************************************************************************
++***** Database ****************************************************************/
++
++typedef struct _gcsDATABASE_COUNTERS
++{
++ /* Number of currently allocated bytes. */
++ gctUINT64 bytes;
++
++ /* Maximum number of bytes allocated (memory footprint). */
++ gctUINT64 maxBytes;
++
++ /* Total number of bytes allocated. */
++ gctUINT64 totalBytes;
++}
++gcsDATABASE_COUNTERS;
++
++typedef struct _gcuDATABASE_INFO
++{
++ /* Counters. */
++ gcsDATABASE_COUNTERS counters;
++
++ /* Time value. */
++ gctUINT64 time;
++}
++gcuDATABASE_INFO;
++
++/*******************************************************************************
++***** Frame database **********************************************************/
++
++/* gcsHAL_FRAME_INFO */
++typedef struct _gcsHAL_FRAME_INFO
++{
++ /* Current timer tick. */
++ OUT gctUINT64 ticks;
++
++ /* Bandwidth counters. */
++ OUT gctUINT readBytes8[8];
++ OUT gctUINT writeBytes8[8];
++
++ /* Counters. */
++ OUT gctUINT cycles[8];
++ OUT gctUINT idleCycles[8];
++ OUT gctUINT mcCycles[8];
++ OUT gctUINT readRequests[8];
++ OUT gctUINT writeRequests[8];
++
++ /* FE counters. */
++ OUT gctUINT drawCount;
++ OUT gctUINT vertexOutCount;
++ OUT gctUINT vertexMissCount;
++
++ /* 3D counters. */
++ OUT gctUINT vertexCount;
++ OUT gctUINT primitiveCount;
++ OUT gctUINT rejectedPrimitives;
++ OUT gctUINT culledPrimitives;
++ OUT gctUINT clippedPrimitives;
++ OUT gctUINT droppedPrimitives;
++ OUT gctUINT frustumClippedPrimitives;
++ OUT gctUINT outPrimitives;
++ OUT gctUINT inPrimitives;
++ OUT gctUINT culledQuadCount;
++ OUT gctUINT totalQuadCount;
++ OUT gctUINT quadCount;
++ OUT gctUINT totalPixelCount;
++
++ /* PE counters. */
++ OUT gctUINT colorKilled[8];
++ OUT gctUINT colorDrawn[8];
++ OUT gctUINT depthKilled[8];
++ OUT gctUINT depthDrawn[8];
++
++ /* Shader counters. */
++ OUT gctUINT shaderCycles;
++ OUT gctUINT vsInstructionCount;
++ OUT gctUINT vsTextureCount;
++ OUT gctUINT vsBranchCount;
++ OUT gctUINT vsVertices;
++ OUT gctUINT psInstructionCount;
++ OUT gctUINT psTextureCount;
++ OUT gctUINT psBranchCount;
++ OUT gctUINT psPixels;
++
++ /* Texture counters. */
++ OUT gctUINT bilinearRequests;
++ OUT gctUINT trilinearRequests;
++ OUT gctUINT txBytes8[2];
++ OUT gctUINT txHitCount;
++ OUT gctUINT txMissCount;
++}
++gcsHAL_FRAME_INFO;
++
++typedef enum _gcePATCH_ID
++{
++ gcePATCH_UNKNOWN = 0xFFFFFFFF,
++
++ /* Benchmark list*/
++ gcePATCH_GLB11 = 0x0,
++ gcePATCH_GLB21,
++ gcePATCH_GLB25,
++ gcePATCH_GLB27,
++
++ gcePATCH_BM21,
++ gcePATCH_MM,
++ gcePATCH_MM06,
++ gcePATCH_MM07,
++ gcePATCH_QUADRANT,
++ gcePATCH_ANTUTU,
++ gcePATCH_SMARTBENCH,
++ gcePATCH_JPCT,
++ gcePATCH_NENAMARK,
++ gcePATCH_NENAMARK2,
++ gcePATCH_NEOCORE,
++ gcePATCH_GLB,
++ gcePATCH_GB,
++ gcePATCH_RTESTVA,
++ gcePATCH_BMX,
++ gcePATCH_BMGUI,
++
++ /* Game list */
++ gcePATCH_NBA2013,
++ gcePATCH_BARDTALE,
++ gcePATCH_BUSPARKING3D,
++ gcePATCH_FISHBOODLE,
++ gcePATCH_SUBWAYSURFER,
++ gcePATCH_HIGHWAYDRIVER,
++ gcePATCH_PREMIUM,
++ gcePATCH_RACEILLEGAL,
++ gcePATCH_BLABLA,
++ gcePATCH_MEGARUN,
++ gcePATCH_GALAXYONFIRE2,
++ gcePATCH_GLOFTR3HM,
++ gcePATCH_GLOFTSXHM,
++ gcePATCH_GLOFTF3HM,
++ gcePATCH_GLOFTGANG,
++ gcePATCH_XRUNNER,
++ gcePATCH_WP,
++ gcePATCH_DEVIL,
++ gcePATCH_HOLYARCH,
++ gcePATCH_MUSE,
++ gcePATCH_SG,
++ gcePATCH_SIEGECRAFT,
++ gcePATCH_CARCHALLENGE,
++ gcePATCH_HEROESCALL,
++ gcePATCH_MONOPOLY,
++ gcePATCH_CTGL20,
++ gcePATCH_FIREFOX,
++ gcePATCH_CHORME,
++ gcePATCH_DUOKANTV,
++ gcePATCH_TESTAPP,
++ gcePATCH_GOOGLEEARTH,
++ gcePATCH_SF4,
++ gcePATCH_SPEEDRACE,
++ gcePATCH_AIRNAVY,
++ gcePATCH_F18NEW,
++ gcePATCH_F18,
++ gcePATCH_WISTONESG,
++ gcvPATCH_VECUNIT_RED,
++ gcvPATCH_NAMESGAS,
++ gcvPATCH_AFTERBURNER,
++ gcvPATCH_UIMARK,
++ /* Count enum*/
++ gcePATCH_COUNT,
++}
++gcePATCH_ID;
++
++#if gcdLINK_QUEUE_SIZE
++typedef struct _gckLINKDATA * gckLINKDATA;
++struct _gckLINKDATA
++{
++ gctUINT32 start;
++ gctUINT32 end;
++ gctINT pid;
++};
++
++typedef struct _gckLINKQUEUE * gckLINKQUEUE;
++struct _gckLINKQUEUE
++{
++ struct _gckLINKDATA data[gcdLINK_QUEUE_SIZE];
++ gctUINT32 rear;
++ gctUINT32 front;
++ gctUINT32 count;
++};
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_types_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_version.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_version.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_version.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_version.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,37 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_version_h_
++#define __gc_hal_version_h_
++
++#define gcvVERSION_MAJOR 4
++
++#define gcvVERSION_MINOR 6
++
++#define gcvVERSION_PATCH 9
++
++#define gcvVERSION_BUILD 9754
++
++#define gcvVERSION_DATE __DATE__
++
++#define gcvVERSION_TIME __TIME__
++
++#endif /* __gc_hal_version_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_vg.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_vg.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_vg.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/kernel/inc/gc_hal_vg.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,913 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_vg_h_
++#define __gc_hal_vg_h_
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++
++#include "gc_hal_rename.h"
++#include "gc_hal_types.h"
++#include "gc_hal_enum.h"
++#include "gc_hal_base.h"
++
++#if gcdENABLE_VG
++
++/* Thread routine type. */
++#if defined(LINUX)
++ typedef gctINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#elif defined(WIN32)
++ typedef gctUINT gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE __stdcall
++#elif defined(__QNXNTO__)
++ typedef void * gctTHREADFUNCRESULT;
++ typedef gctPOINTER gctTHREADFUNCPARAMETER;
++# define gctTHREADFUNCTYPE
++#endif
++
++typedef gctTHREADFUNCRESULT (gctTHREADFUNCTYPE * gctTHREADFUNC) (
++ gctTHREADFUNCPARAMETER ThreadParameter
++ );
++
++
++#if defined(gcvDEBUG)
++# undef gcvDEBUG
++#endif
++
++#define gcdFORCE_DEBUG 0
++#define gcdFORCE_MESSAGES 0
++
++
++#if DBG || defined(DEBUG) || defined(_DEBUG) || gcdFORCE_DEBUG
++# define gcvDEBUG 1
++#else
++# define gcvDEBUG 0
++#endif
++
++#define _gcmERROR_RETURN(prefix, func) \
++ status = func; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ prefix##PRINT_VERSION(); \
++ prefix##TRACE(gcvLEVEL_ERROR, \
++ #prefix "ERR_RETURN: status=%d(%s) @ %s(%d)", \
++ status, gcoOS_DebugStatus2Name(status), __FUNCTION__, __LINE__); \
++ return status; \
++ } \
++ do { } while (gcvFALSE)
++
++#define gcmERROR_RETURN(func) _gcmERROR_RETURN(gcm, func)
++
++#define gcmLOG_LOCATION()
++
++#define gcmkIS_ERROR(status) (status < 0)
++
++#define gcmALIGNDOWN(n, align) \
++( \
++ (n) & ~((align) - 1) \
++)
++
++#define gcmIS_VALID_INDEX(Index, Array) \
++ (((gctUINT) (Index)) < gcmCOUNTOF(Array))
++
++
++#define gcmIS_NAN(x) \
++( \
++ ((* (gctUINT32_PTR) &(x)) & 0x7FFFFFFF) == 0x7FFFFFFF \
++)
++
++#define gcmLERP(v1, v2, w) \
++ ((v1) * (w) + (v2) * (1.0f - (w)))
++
++#define gcmINTERSECT(Start1, Start2, Length) \
++ (gcmABS((Start1) - (Start2)) < (Length))
++
++/*******************************************************************************
++**
++** gcmERR_GOTO
++**
++** Prints a message and terminates the current loop on error.
++**
++** ASSUMPTIONS:
++**
++** 'status' variable of gceSTATUS type must be defined.
++**
++** ARGUMENTS:
++**
++** Function
++** Function to evaluate.
++*/
++
++#define gcmERR_GOTO(Function) \
++ status = Function; \
++ if (gcmIS_ERROR(status)) \
++ { \
++ gcmTRACE( \
++ gcvLEVEL_ERROR, \
++ "gcmERR_GOTO: status=%d @ line=%d in function %s.\n", \
++ status, __LINE__, __FUNCTION__ \
++ ); \
++ goto ErrorHandler; \
++ }
++
++#if gcvDEBUG || gcdFORCE_MESSAGES
++# define gcmVERIFY_BOOLEAN(Expression) \
++ gcmASSERT( \
++ ( (Expression) == gcvFALSE ) || \
++ ( (Expression) == gcvTRUE ) \
++ )
++#else
++# define gcmVERIFY_BOOLEAN(Expression)
++#endif
++
++/*******************************************************************************
++**
++** gcmVERIFYFIELDFIT
++**
++** Verify whether the value fits in the field.
++**
++** ARGUMENTS:
++**
++** data Data value.
++** reg Name of register.
++** field Name of field within register.
++** value Value for field.
++*/
++#define gcmVERIFYFIELDFIT(reg, field, value) \
++ gcmASSERT( \
++ (value) <= gcmFIELDMAX(reg, field) \
++ )
++/*******************************************************************************
++**
++** gcmFIELDMAX
++**
++** Get field maximum value.
++**
++** ARGUMENTS:
++**
++** reg Name of register.
++** field Name of field within register.
++*/
++#define gcmFIELDMAX(reg, field) \
++( \
++ (gctUINT32) \
++ ( \
++ (__gcmGETSIZE(reg##_##field) == 32) \
++ ? ~0 \
++ : (~(~0 << __gcmGETSIZE(reg##_##field))) \
++ ) \
++)
++
++
++/* ANSI C does not have the 'f' functions, define replacements here. */
++#define gcmSINF(x) ((gctFLOAT) sin(x))
++#define gcmCOSF(x) ((gctFLOAT) cos(x))
++#define gcmASINF(x) ((gctFLOAT) asin(x))
++#define gcmACOSF(x) ((gctFLOAT) acos(x))
++#define gcmSQRTF(x) ((gctFLOAT) sqrt(x))
++#define gcmFABSF(x) ((gctFLOAT) fabs(x))
++#define gcmFMODF(x, y) ((gctFLOAT) fmod((x), (y)))
++#define gcmCEILF(x) ((gctFLOAT) ceil(x))
++#define gcmFLOORF(x) ((gctFLOAT) floor(x))
++
++
++
++/* Fixed point constants. */
++#define gcvZERO_X ((gctFIXED_POINT) 0x00000000)
++#define gcvHALF_X ((gctFIXED_POINT) 0x00008000)
++#define gcvONE_X ((gctFIXED_POINT) 0x00010000)
++#define gcvNEGONE_X ((gctFIXED_POINT) 0xFFFF0000)
++#define gcvTWO_X ((gctFIXED_POINT) 0x00020000)
++
++/* Integer constants. */
++#define gcvMAX_POS_INT ((gctINT) 0x7FFFFFFF)
++#define gcvMAX_NEG_INT ((gctINT) 0x80000000)
++
++/* Float constants. */
++#define gcvMAX_POS_FLOAT ((gctFLOAT) 3.4028235e+038)
++#define gcvMAX_NEG_FLOAT ((gctFLOAT) -3.4028235e+038)
++
++/******************************************************************************\
++***************************** Miscellaneous Macro ******************************
++\******************************************************************************/
++
++#define gcmKB2BYTES(Kilobyte) \
++( \
++ (Kilobyte) << 10 \
++)
++
++#define gcmMB2BYTES(Megabyte) \
++( \
++ (Megabyte) << 20 \
++)
++
++#define gcmMAT(Matrix, Row, Column) \
++( \
++ (Matrix) [(Row) * 3 + (Column)] \
++)
++
++#define gcmMAKE2CHAR(Char1, Char2) \
++( \
++ ((gctUINT16) (gctUINT8) (Char1) << 0) | \
++ ((gctUINT16) (gctUINT8) (Char2) << 8) \
++)
++
++#define gcmMAKE4CHAR(Char1, Char2, Char3, Char4) \
++( \
++ ((gctUINT32)(gctUINT8) (Char1) << 0) | \
++ ((gctUINT32)(gctUINT8) (Char2) << 8) | \
++ ((gctUINT32)(gctUINT8) (Char3) << 16) | \
++ ((gctUINT32)(gctUINT8) (Char4) << 24) \
++)
++
++/* some platforms need to fix the physical address for HW to access*/
++#define gcmFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++#define gcmkFIXADDRESS(address) \
++(\
++ (address)\
++)
++
++/******************************************************************************\
++****************************** Kernel Debug Macro ******************************
++\******************************************************************************/
++
++/* Set signal to signaled state for specified process. */
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/* Return the kernel logical pointer for the given physical one. */
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ );
++
++/*----------------------------------------------------------------------------*/
++/*----------------------------- Semaphore Object -----------------------------*/
++
++/* Increment the value of a semaphore. */
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++/* Decrement the value of a semaphore (waiting might occur). */
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ );
++
++
++/*----------------------------------------------------------------------------*/
++/*------------------------------- Thread Object ------------------------------*/
++
++/* Start a thread. */
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ );
++
++/* Stop a thread. */
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++/* Verify whether the thread is still running. */
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ );
++
++
++/* Construct a new gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Construct(
++ IN gckOS Os,
++ IN gctPOINTER Context,
++ IN gckKERNEL inKernel,
++ OUT gckVGKERNEL * Kernel
++ );
++
++/* Destroy an gckVGKERNEL object. */
++gceSTATUS
++gckVGKERNEL_Destroy(
++ IN gckVGKERNEL Kernel
++ );
++
++/* Allocate linear video memory. */
++gceSTATUS
++gckKERNEL_AllocateLinearMemory(
++ IN gckKERNEL Kernel,
++ IN OUT gcePOOL * Pool,
++ IN gctSIZE_T Bytes,
++ IN gctSIZE_T Alignment,
++ IN gceSURF_TYPE Type,
++ OUT gcuVIDMEM_NODE_PTR * Node
++ );
++
++/* Unmap memory. */
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ );
++
++/* Dispatch a user-level command. */
++gceSTATUS
++gckVGKERNEL_Dispatch(
++ IN gckKERNEL Kernel,
++ IN gctBOOL FromUser,
++ IN OUT struct _gcsHAL_INTERFACE * Interface
++ );
++
++/* Query command buffer requirements. */
++gceSTATUS
++gckKERNEL_QueryCommandBuffer(
++ IN gckKERNEL Kernel,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++#if gcdDYNAMIC_MAP_RESERVED_MEMORY
++gceSTATUS
++gckOS_MapReservedMemoryToKernel(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctINT Bytes,
++ IN OUT gctPOINTER *Virtual
++ );
++
++gceSTATUS
++gckOS_UnmapReservedMemoryFromKernel(
++ IN gctPOINTER Virtual
++ );
++#endif
++
++/******************************************************************************\
++******************************* gckVGHARDWARE Object ******************************
++\******************************************************************************/
++
++/* Construct a new gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Construct(
++ IN gckOS Os,
++ OUT gckVGHARDWARE * Hardware
++ );
++
++/* Destroy an gckVGHARDWARE object. */
++gceSTATUS
++gckVGHARDWARE_Destroy(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Query system memory requirements. */
++gceSTATUS
++gckVGHARDWARE_QuerySystemMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * SystemSize,
++ OUT gctUINT32 * SystemBaseAddress
++ );
++
++/* Build virtual address. */
++gceSTATUS
++gckVGHARDWARE_BuildVirtualAddress(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Index,
++ IN gctUINT32 Offset,
++ OUT gctUINT32 * Address
++ );
++
++/* Kickstart the command processor. */
++gceSTATUS
++gckVGHARDWARE_Execute(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ IN gctSIZE_T Count
++ );
++
++/* Query the available memory. */
++gceSTATUS
++gckVGHARDWARE_QueryMemory(
++ IN gckVGHARDWARE Hardware,
++ OUT gctSIZE_T * InternalSize,
++ OUT gctUINT32 * InternalBaseAddress,
++ OUT gctUINT32 * InternalAlignment,
++ OUT gctSIZE_T * ExternalSize,
++ OUT gctUINT32 * ExternalBaseAddress,
++ OUT gctUINT32 * ExternalAlignment,
++ OUT gctUINT32 * HorizontalTileSize,
++ OUT gctUINT32 * VerticalTileSize
++ );
++
++/* Query the identity of the hardware. */
++gceSTATUS
++gckVGHARDWARE_QueryChipIdentity(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPMODEL* ChipModel,
++ OUT gctUINT32* ChipRevision,
++ OUT gctUINT32* ChipFeatures,
++ OUT gctUINT32* ChipMinorFeatures,
++ OUT gctUINT32* ChipMinorFeatures1
++ );
++
++/* Convert an API format. */
++gceSTATUS
++gckVGHARDWARE_ConvertFormat(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_FORMAT Format,
++ OUT gctUINT32 * BitsPerPixel,
++ OUT gctUINT32 * BytesPerTile
++ );
++
++/* Split a harwdare specific address into API stuff. */
++gceSTATUS
++gckVGHARDWARE_SplitMemory(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Address,
++ OUT gcePOOL * Pool,
++ OUT gctUINT32 * Offset
++ );
++
++/* Align size to tile boundary. */
++gceSTATUS
++gckVGHARDWARE_AlignToTile(
++ IN gckVGHARDWARE Hardware,
++ IN gceSURF_TYPE Type,
++ IN OUT gctUINT32_PTR Width,
++ IN OUT gctUINT32_PTR Height
++ );
++
++/* Convert logical address to hardware specific address. */
++gceSTATUS
++gckVGHARDWARE_ConvertLogical(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ );
++
++/* Program MMU. */
++gceSTATUS
++gckVGHARDWARE_SetMMU(
++ IN gckVGHARDWARE Hardware,
++ IN gctPOINTER Logical
++ );
++
++/* Flush the MMU. */
++gceSTATUS
++gckVGHARDWARE_FlushMMU(
++ IN gckVGHARDWARE Hardware
++ );
++
++/* Get idle register. */
++gceSTATUS
++gckVGHARDWARE_GetIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32 * Data
++ );
++
++/* Flush the caches. */
++gceSTATUS
++gckVGHARDWARE_Flush(
++ IN gckVGHARDWARE Hardware,
++ IN gceKERNEL_FLUSH Flush,
++ IN gctPOINTER Logical,
++ IN OUT gctSIZE_T * Bytes
++ );
++
++/* Enable/disable fast clear. */
++gceSTATUS
++gckVGHARDWARE_SetFastClear(
++ IN gckVGHARDWARE Hardware,
++ IN gctINT Enable
++ );
++
++gceSTATUS
++gckVGHARDWARE_ReadInterrupt(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32_PTR IDs
++ );
++
++/* Power management. */
++gceSTATUS
++gckVGHARDWARE_SetPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ IN gceCHIPPOWERSTATE State
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerManagementState(
++ IN gckVGHARDWARE Hardware,
++ OUT gceCHIPPOWERSTATE* State
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerManagement(
++ IN gckVGHARDWARE Hardware,
++ IN gctBOOL PowerManagement
++ );
++
++gceSTATUS
++gckVGHARDWARE_SetPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ IN gctUINT32 Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryPowerOffTimeout(
++ IN gckVGHARDWARE Hardware,
++ OUT gctUINT32* Timeout
++ );
++
++gceSTATUS
++gckVGHARDWARE_QueryIdle(
++ IN gckVGHARDWARE Hardware,
++ OUT gctBOOL_PTR IsIdle
++ );
++/******************************************************************************\
++*************************** Command Buffer Structures **************************
++\******************************************************************************/
++
++/* Vacant command buffer marker. */
++#define gcvVACANT_BUFFER ((gcsCOMPLETION_SIGNAL_PTR) (1))
++
++/* Command buffer header. */
++typedef struct _gcsCMDBUFFER * gcsCMDBUFFER_PTR;
++typedef struct _gcsCMDBUFFER
++{
++ /* Pointer to the completion signal. */
++ gcsCOMPLETION_SIGNAL_PTR completion;
++
++ /* The user sets this to the node of the container buffer whitin which
++ this particular command buffer resides. The kernel sets this to the
++ node of the internally allocated buffer. */
++ gctUINT64 node;
++
++ /* Command buffer hardware address. */
++ gctUINT32 address;
++
++ /* The offset of the buffer from the beginning of the header. */
++ gctUINT32 bufferOffset;
++
++ /* Size of the area allocated for the data portion of this particular
++ command buffer (headers and tail reserves are excluded). */
++ gctSIZE_T size;
++
++ /* Offset into the buffer [0..size]; reflects exactly how much data has
++ been put into the command buffer. */
++ gctUINT offset;
++
++ /* The number of command units in the buffer for the hardware to
++ execute. */
++ gctSIZE_T dataCount;
++
++ /* MANAGED BY : user HAL (gcoBUFFER object).
++ USED BY : user HAL (gcoBUFFER object).
++ Points to the immediate next allocated command buffer. */
++ gcsCMDBUFFER_PTR nextAllocated;
++
++ /* MANAGED BY : user layers (HAL and drivers).
++ USED BY : kernel HAL (gcoBUFFER object).
++ Points to the next subbuffer if any. A family of subbuffers are chained
++ together and are meant to be executed inseparably as a unit. Meaning
++ that context switching cannot occur while a chain of subbuffers is being
++ executed. */
++ gcsCMDBUFFER_PTR nextSubBuffer;
++}
++gcsCMDBUFFER;
++
++/* Command queue element. */
++typedef struct _gcsVGCMDQUEUE
++{
++ /* Pointer to the command buffer header. */
++ gcsCMDBUFFER_PTR commandBuffer;
++
++ /* Dynamic vs. static command buffer state. */
++ gctBOOL dynamic;
++}
++gcsVGCMDQUEUE;
++
++/* Context map entry. */
++typedef struct _gcsVGCONTEXT_MAP
++{
++ /* State index. */
++ gctUINT32 index;
++
++ /* New state value. */
++ gctUINT32 data;
++
++ /* Points to the next entry in the mod list. */
++ gcsVGCONTEXT_MAP_PTR next;
++}
++gcsVGCONTEXT_MAP;
++
++/* gcsVGCONTEXT structure that holds the current context. */
++typedef struct _gcsVGCONTEXT
++{
++ /* Context ID. */
++ gctUINT64 id;
++
++ /* State caching ebable flag. */
++ gctBOOL stateCachingEnabled;
++
++ /* Current pipe. */
++ gctUINT32 currentPipe;
++
++ /* State map/mod buffer. */
++ gctSIZE_T mapFirst;
++ gctSIZE_T mapLast;
++#ifdef __QNXNTO__
++ gctSIZE_T mapContainerSize;
++#endif
++ gcsVGCONTEXT_MAP_PTR mapContainer;
++ gcsVGCONTEXT_MAP_PTR mapPrev;
++ gcsVGCONTEXT_MAP_PTR mapCurr;
++ gcsVGCONTEXT_MAP_PTR firstPrevMap;
++ gcsVGCONTEXT_MAP_PTR firstCurrMap;
++
++ /* Main context buffer. */
++ gcsCMDBUFFER_PTR header;
++ gctUINT32_PTR buffer;
++
++ /* Completion signal. */
++ gctHANDLE process;
++ gctSIGNAL signal;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsVGCONTEXT;
++
++/* User space task header. */
++typedef struct _gcsTASK * gcsTASK_PTR;
++typedef struct _gcsTASK
++{
++ /* Pointer to the next task for the same interrupt in user space. */
++ gcsTASK_PTR next;
++
++ /* Size of the task data that immediately follows the structure. */
++ gctUINT size;
++
++ /* Task data starts here. */
++ /* ... */
++}
++gcsTASK;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_ENTRY * gcsTASK_MASTER_ENTRY_PTR;
++typedef struct _gcsTASK_MASTER_ENTRY
++{
++ /* Pointers to the head and to the tail of the task chain. */
++ gcsTASK_PTR head;
++ gcsTASK_PTR tail;
++}
++gcsTASK_MASTER_ENTRY;
++
++/* User space task master table entry. */
++typedef struct _gcsTASK_MASTER_TABLE
++{
++ /* Table with one entry per block. */
++ gcsTASK_MASTER_ENTRY table[gcvBLOCK_COUNT];
++
++ /* The total number of tasks sckeduled. */
++ gctUINT count;
++
++ /* The total size of event data in bytes. */
++ gctUINT size;
++
++#if defined(__QNXNTO__)
++ gctINT32 coid;
++ gctINT32 rcvid;
++#endif
++}
++gcsTASK_MASTER_TABLE;
++
++/******************************************************************************\
++***************************** gckVGINTERRUPT Object ******************************
++\******************************************************************************/
++
++typedef struct _gckVGINTERRUPT * gckVGINTERRUPT;
++
++typedef gceSTATUS (* gctINTERRUPT_HANDLER)(
++ IN gckVGKERNEL Kernel
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Construct(
++ IN gckVGKERNEL Kernel,
++ OUT gckVGINTERRUPT * Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Destroy(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Enable(
++ IN gckVGINTERRUPT Interrupt,
++ IN OUT gctINT32_PTR Id,
++ IN gctINTERRUPT_HANDLER Handler
++ );
++
++gceSTATUS
++gckVGINTERRUPT_Disable(
++ IN gckVGINTERRUPT Interrupt,
++ IN gctINT32 Id
++ );
++
++#ifndef __QNXNTO__
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++#else
++
++gceSTATUS
++gckVGINTERRUPT_Enque(
++ IN gckVGINTERRUPT Interrupt,
++ OUT gckOS *Os,
++ OUT gctSEMAPHORE *Semaphore
++ );
++
++#endif
++
++gceSTATUS
++gckVGINTERRUPT_DumpState(
++ IN gckVGINTERRUPT Interrupt
++ );
++
++
++/******************************************************************************\
++******************************* gckVGCOMMAND Object *******************************
++\******************************************************************************/
++
++typedef struct _gckVGCOMMAND * gckVGCOMMAND;
++
++/* Construct a new gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctUINT TaskGranularity,
++ IN gctUINT QueueSize,
++ OUT gckVGCOMMAND * Command
++ );
++
++/* Destroy an gckVGCOMMAND object. */
++gceSTATUS
++gckVGCOMMAND_Destroy(
++ IN gckVGCOMMAND Command
++ );
++
++/* Query command buffer attributes. */
++gceSTATUS
++gckVGCOMMAND_QueryCommandBuffer(
++ IN gckVGCOMMAND Command,
++ OUT gcsCOMMAND_BUFFER_INFO_PTR Information
++ );
++
++/* Allocate a command queue. */
++gceSTATUS
++gckVGCOMMAND_Allocate(
++ IN gckVGCOMMAND Command,
++ IN gctSIZE_T Size,
++ OUT gcsCMDBUFFER_PTR * CommandBuffer,
++ OUT gctPOINTER * Data
++ );
++
++/* Release memory held by the command queue. */
++gceSTATUS
++gckVGCOMMAND_Free(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Schedule the command queue for execution. */
++gceSTATUS
++gckVGCOMMAND_Execute(
++ IN gckVGCOMMAND Command,
++ IN gcsCMDBUFFER_PTR CommandBuffer
++ );
++
++/* Commit a buffer to the command queue. */
++gceSTATUS
++gckVGCOMMAND_Commit(
++ IN gckVGCOMMAND Command,
++ IN gcsVGCONTEXT_PTR Context,
++ IN gcsVGCMDQUEUE_PTR Queue,
++ IN gctUINT EntryCount,
++ IN gcsTASK_MASTER_TABLE_PTR TaskTable
++ );
++
++/******************************************************************************\
++********************************* gckVGMMU Object ********************************
++\******************************************************************************/
++
++typedef struct _gckVGMMU * gckVGMMU;
++
++/* Construct a new gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Construct(
++ IN gckVGKERNEL Kernel,
++ IN gctSIZE_T MmuSize,
++ OUT gckVGMMU * Mmu
++ );
++
++/* Destroy an gckVGMMU object. */
++gceSTATUS
++gckVGMMU_Destroy(
++ IN gckVGMMU Mmu
++ );
++
++/* Allocate pages inside the MMU. */
++gceSTATUS
++gckVGMMU_AllocatePages(
++ IN gckVGMMU Mmu,
++ IN gctSIZE_T PageCount,
++ OUT gctPOINTER * PageTable,
++ OUT gctUINT32 * Address
++ );
++
++/* Remove a page table from the MMU. */
++gceSTATUS
++gckVGMMU_FreePages(
++ IN gckVGMMU Mmu,
++ IN gctPOINTER PageTable,
++ IN gctSIZE_T PageCount
++ );
++
++/* Set the MMU page with info. */
++gceSTATUS
++gckVGMMU_SetPage(
++ IN gckVGMMU Mmu,
++ IN gctUINT32 PageAddress,
++ IN gctUINT32 *PageEntry
++ );
++
++/* Flush MMU */
++gceSTATUS
++gckVGMMU_Flush(
++ IN gckVGMMU Mmu
++ );
++
++#endif /* gcdENABLE_VG */
++
++#ifdef __cplusplus
++} /* extern "C" */
++#endif
++
++#endif /* __gc_hal_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/default/gc_hal_kernel_allocator_array.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,34 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++extern gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++
++gcsALLOCATOR_DESC allocatorArray[] =
++{
++ /* Default allocator. */
++ gcmkDEFINE_ALLOCATOR_DESC("default", _DefaultAlloctorInit),
++};
++
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_array.h 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,45 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++extern gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++
++#if LINUX_CMA_FSL
++gceSTATUS
++_CMAFSLAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ );
++#endif
++
++gcsALLOCATOR_DESC allocatorArray[] =
++{
++#if LINUX_CMA_FSL
++ gcmkDEFINE_ALLOCATOR_DESC("cmafsl", _CMAFSLAlloctorInit),
++#endif
++ /* Default allocator. */
++ gcmkDEFINE_ALLOCATOR_DESC("default", _DefaultAlloctorInit),
++};
++
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/allocator/freescale/gc_hal_kernel_allocator_cma.c 2015-07-27 23:13:06.218794344 +0200
+@@ -0,0 +1,412 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_allocator.h"
++
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/dma-mapping.h>
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsCMA_PRIV * gcsCMA_PRIV_PTR;
++typedef struct _gcsCMA_PRIV {
++ gctUINT32 cmasize;
++}
++gcsCMA_PRIV;
++
++struct mdl_cma_priv {
++ gctPOINTER kvaddr;
++ dma_addr_t physical;
++};
++
++int gc_cma_usage_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckALLOCATOR Allocator = node->device;
++ gcsCMA_PRIV_PTR priv = Allocator->privateData;
++
++ seq_printf(m, "cma: %u bytes\n", priv->cmasize);
++
++ return 0;
++}
++
++static gcsINFO InfoList[] =
++{
++ {"cmausage", gc_cma_usage_show},
++};
++
++static void
++_DefaultAllocatorDebugfsInit(
++ IN gckALLOCATOR Allocator,
++ IN gckDEBUGFS_DIR Root
++ )
++{
++ gcmkVERIFY_OK(
++ gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "cma"));
++
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList),
++ Allocator
++ ));
++}
++
++static void
++_DefaultAllocatorDebugfsCleanup(
++ IN gckALLOCATOR Allocator
++ )
++{
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList)
++ ));
++
++ gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir);
++}
++
++static gceSTATUS
++_CMAFSLAlloc(
++ IN gckALLOCATOR Allocator,
++ INOUT PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flags
++ )
++{
++ gceSTATUS status;
++ gcsCMA_PRIV_PTR priv = (gcsCMA_PRIV_PTR)Allocator->privateData;
++
++ struct mdl_cma_priv *mdl_priv=gcvNULL;
++ gckOS os = Allocator->os;
++
++ gcmkHEADER_ARG("Mdl=%p NumPages=%d", Mdl, NumPages);
++
++ gcmkONERROR(gckOS_Allocate(os, sizeof(struct mdl_cma_priv), (gctPOINTER *)&mdl_priv));
++ mdl_priv->kvaddr = gcvNULL;
++
++ mdl_priv->kvaddr = dma_alloc_writecombine(gcvNULL,
++ NumPages * PAGE_SIZE,
++ &mdl_priv->physical,
++ GFP_KERNEL | gcdNOWARN);
++
++ if (mdl_priv->kvaddr == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ Mdl->priv = mdl_priv;
++ priv->cmasize += NumPages * PAGE_SIZE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if(mdl_priv)
++ gckOS_Free(os, mdl_priv);
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_CMAFSLFree(
++ IN gckALLOCATOR Allocator,
++ IN OUT PLINUX_MDL Mdl
++ )
++{
++ gckOS os = Allocator->os;
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ gcsCMA_PRIV_PTR priv = (gcsCMA_PRIV_PTR)Allocator->privateData;
++ dma_free_writecombine(gcvNULL,
++ Mdl->numPages * PAGE_SIZE,
++ mdl_priv->kvaddr,
++ mdl_priv->physical);
++ gckOS_Free(os, mdl_priv);
++ priv->cmasize -= Mdl->numPages * PAGE_SIZE;
++}
++
++gctINT
++_CMAFSLMapUser(
++ gckALLOCATOR Allocator,
++ PLINUX_MDL Mdl,
++ PLINUX_MDL_MAP MdlMap,
++ gctBOOL Cacheable
++ )
++{
++
++ PLINUX_MDL mdl = Mdl;
++ PLINUX_MDL_MAP mdlMap = MdlMap;
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++
++ gcmkHEADER_ARG("Allocator=%p Mdl=%p MdlMap=%p gctBOOL=%d", Allocator, Mdl, MdlMap, Cacheable);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (dma_mmap_writecombine(gcvNULL,
++ mdlMap->vma,
++ mdl_priv->kvaddr,
++ mdl_priv->physical,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): dma_mmap_attrs error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ gckOS_Print("incorrect mdl:conti%d\n",mdl->contiguous);
++ }
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_CMAUnmapUser(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ )
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++gceSTATUS
++_CMAMapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ )
++{
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ *Logical =mdl_priv->kvaddr;
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_CMAUnmapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++extern gceSTATUS
++_DefaultLogicalToPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ );
++
++extern gceSTATUS
++_DefaultCache(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++gceSTATUS
++_CMAPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ struct mdl_cma_priv *mdl_priv=(struct mdl_cma_priv *)Mdl->priv;
++ gcmkASSERT(!Offset);
++ *Physical = mdl_priv->physical;
++
++ return gcvSTATUS_OK;
++}
++
++
++extern void
++_DefaultAllocatorDestructor(
++ IN void* PrivateData
++ );
++
++/* Default allocator operations. */
++gcsALLOCATOR_OPERATIONS CMAFSLAllocatorOperations = {
++ .Alloc = _CMAFSLAlloc,
++ .Free = _CMAFSLFree,
++ .MapUser = _CMAFSLMapUser,
++ .UnmapUser = _CMAUnmapUser,
++ .MapKernel = _CMAMapKernel,
++ .UnmapKernel = _CMAUnmapKernel,
++ .LogicalToPhysical = _DefaultLogicalToPhysical,
++ .Cache = _DefaultCache,
++ .Physical = _CMAPhysical,
++};
++
++/* Default allocator entry. */
++gceSTATUS
++_CMAFSLAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++ gcsCMA_PRIV_PTR priv = gcvNULL;
++
++ gcmkONERROR(
++ gckALLOCATOR_Construct(Os, &CMAFSLAllocatorOperations, &allocator));
++
++ priv = kzalloc(gcmSIZEOF(gcsCMA_PRIV), GFP_KERNEL | gcdNOWARN);
++
++ if (!priv)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Register private data. */
++ allocator->privateData = priv;
++ allocator->privateDataDestructor = _DefaultAllocatorDestructor;
++
++ allocator->debugfsInit = _DefaultAllocatorDebugfsInit;
++ allocator->debugfsCleanup = _DefaultAllocatorDebugfsCleanup;
++
++ allocator->capability = gcvALLOC_FLAG_CONTIGUOUS;
++
++ *Allocator = allocator;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.c 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,938 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_allocator.h"
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mman.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++
++#include "gc_hal_kernel_allocator_array.h"
++#include "gc_hal_kernel_platform.h"
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsDEFAULT_PRIV * gcsDEFAULT_PRIV_PTR;
++typedef struct _gcsDEFAULT_PRIV {
++ gctUINT32 low;
++ gctUINT32 high;
++}
++gcsDEFAULT_PRIV;
++
++/******************************************************************************\
++************************** Default Allocator Debugfs ***************************
++\******************************************************************************/
++
++int gc_usage_show(struct seq_file* m, void* data)
++{
++ gcsINFO_NODE *node = m->private;
++ gckALLOCATOR Allocator = node->device;
++ gcsDEFAULT_PRIV_PTR priv = Allocator->privateData;
++
++ seq_printf(m, "low: %u bytes\n", priv->low);
++ seq_printf(m, "high: %u bytes\n", priv->high);
++
++ return 0;
++}
++
++static gcsINFO InfoList[] =
++{
++ {"lowHighUsage", gc_usage_show},
++};
++
++static void
++_DefaultAllocatorDebugfsInit(
++ IN gckALLOCATOR Allocator,
++ IN gckDEBUGFS_DIR Root
++ )
++{
++ gcmkVERIFY_OK(
++ gckDEBUGFS_DIR_Init(&Allocator->debugfsDir, Root->root, "default"));
++
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_CreateFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList),
++ Allocator
++ ));
++}
++
++static void
++_DefaultAllocatorDebugfsCleanup(
++ IN gckALLOCATOR Allocator
++ )
++{
++ gcmkVERIFY_OK(gckDEBUGFS_DIR_RemoveFiles(
++ &Allocator->debugfsDir,
++ InfoList,
++ gcmCOUNTOF(InfoList)
++ ));
++
++ gckDEBUGFS_DIR_Deinit(&Allocator->debugfsDir);
++}
++
++
++static void
++_NonContiguousFree(
++ IN struct page ** Pages,
++ IN gctUINT32 NumPages
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Pages=0x%X, NumPages=%d", Pages, NumPages);
++
++ gcmkASSERT(Pages != gcvNULL);
++
++ for (i = 0; i < NumPages; i++)
++ {
++ __free_page(Pages[i]);
++ }
++
++ if (is_vmalloc_addr(Pages))
++ {
++ vfree(Pages);
++ }
++ else
++ {
++ kfree(Pages);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++static struct page **
++_NonContiguousAlloc(
++ IN gctUINT32 NumPages
++ )
++{
++ struct page ** pages;
++ struct page *p;
++ gctINT i, size;
++
++ gcmkHEADER_ARG("NumPages=%lu", NumPages);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
++ if (NumPages > totalram_pages)
++#else
++ if (NumPages > num_physpages)
++#endif
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ size = NumPages * sizeof(struct page *);
++
++ pages = kmalloc(size, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ pages = vmalloc(size);
++
++ if (!pages)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ }
++
++ for (i = 0; i < NumPages; i++)
++ {
++ p = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN);
++
++ if (!p)
++ {
++ _NonContiguousFree(pages, i);
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ pages[i] = p;
++ }
++
++ gcmkFOOTER_ARG("pages=0x%X", pages);
++ return pages;
++}
++
++gctSTRING
++_CreateKernelVirtualMapping(
++ IN PLINUX_MDL Mdl
++ )
++{
++ gctSTRING addr = 0;
++ gctINT numPages = Mdl->numPages;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ if (Mdl->contiguous)
++ {
++ addr = page_address(Mdl->u.contiguousPages);
++ }
++ else
++ {
++ addr = vmap(Mdl->u.nonContiguousPages,
++ numPages,
++ 0,
++ PAGE_KERNEL);
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++ }
++#else
++ struct page ** pages;
++ gctBOOL free = gcvFALSE;
++ gctINT i;
++
++ if (Mdl->contiguous)
++ {
++ pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ return gcvNULL;
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ pages[i] = nth_page(Mdl->u.contiguousPages, i);
++ }
++
++ free = gcvTRUE;
++ }
++ else
++ {
++ pages = Mdl->u.nonContiguousPages;
++ }
++
++ /* ioremap() can't work on system memory since 2.6.38. */
++ addr = vmap(pages, numPages, 0, gcmkNONPAGED_MEMROY_PROT(PAGE_KERNEL));
++
++ if (free)
++ {
++ kfree(pages);
++ }
++
++#endif
++
++ return addr;
++}
++
++void
++_DestoryKernelVirtualMapping(
++ IN gctSTRING Addr
++ )
++{
++#if !gcdNONPAGED_MEMORY_CACHEABLE
++ vunmap(Addr);
++#endif
++}
++
++void
++_UnmapUserLogical(
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++)
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++/***************************************************************************\
++************************ Default Allocator **********************************
++\***************************************************************************/
++#define C_MAX_PAGENUM (50*1024)
++static gceSTATUS
++_DefaultAlloc(
++ IN gckALLOCATOR Allocator,
++ INOUT PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flags
++ )
++{
++ gceSTATUS status;
++ gctUINT32 order;
++ gctSIZE_T bytes;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctPOINTER addr = gcvNULL;
++#endif
++ gctUINT32 numPages;
++ gctUINT i = 0;
++ gctBOOL contiguous = Flags & gcvALLOC_FLAG_CONTIGUOUS;
++ struct sysinfo temsysinfo;
++ gcsDEFAULT_PRIV_PTR priv = (gcsDEFAULT_PRIV_PTR)Allocator->privateData;
++
++ gcmkHEADER_ARG("Mdl=%p NumPages=%d", Mdl, NumPages);
++
++ numPages = NumPages;
++ bytes = NumPages * PAGE_SIZE;
++ order = get_order(bytes);
++
++ si_meminfo(&temsysinfo);
++
++ if (Flags & gcvALLOC_FLAG_MEMLIMIT)
++ {
++ if ( (temsysinfo.freeram < NumPages) || ((temsysinfo.freeram-NumPages) < C_MAX_PAGENUM) )
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++ }
++
++ if (contiguous)
++ {
++ if (order >= MAX_ORDER)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ addr =
++ alloc_pages_exact(bytes, GFP_KERNEL | gcdNOWARN | __GFP_NORETRY);
++
++ Mdl->u.contiguousPages = addr
++ ? virt_to_page(addr)
++ : gcvNULL;
++
++ Mdl->exact = gcvTRUE;
++#else
++ Mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | gcdNOWARN | __GFP_NORETRY, order);
++#endif
++
++ if (Mdl->u.contiguousPages == gcvNULL)
++ {
++ Mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN, order);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ Mdl->exact = gcvFALSE;
++#endif
++ }
++ }
++ else
++ {
++ Mdl->u.nonContiguousPages = _NonContiguousAlloc(numPages);
++ }
++
++ if (Mdl->u.contiguousPages == gcvNULL && Mdl->u.nonContiguousPages == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ struct page *page;
++
++ if (contiguous)
++ {
++ page = nth_page(Mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(Mdl->u.nonContiguousPages, i);
++ }
++
++ SetPageReserved(page);
++
++ if (!PageHighMem(page) && page_to_phys(page))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CacheFlush(Allocator->os, _GetProcessID(), gcvNULL,
++ page_to_phys(page),
++ page_address(page),
++ PAGE_SIZE));
++
++ priv->low += PAGE_SIZE;
++ }
++ else
++ {
++ flush_dcache_page(page);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED && defined(CONFIG_OUTER_CACHE) && gcdENABLE_OUTER_CACHE_PATCH
++ if (page_to_phys(page))
++ {
++ _HandleOuterCache(
++ Allocator->os,
++ page_to_phys(page),
++ gcvNULL,
++ PAGE_SIZE,
++ gcvCACHE_FLUSH
++ );
++ }
++#endif
++
++ priv->high += PAGE_SIZE;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static void
++_DefaultFree(
++ IN gckALLOCATOR Allocator,
++ IN OUT PLINUX_MDL Mdl
++ )
++{
++ gctINT i;
++ struct page * page;
++ gcsDEFAULT_PRIV_PTR priv = (gcsDEFAULT_PRIV_PTR)Allocator->privateData;
++
++ for (i = 0; i < Mdl->numPages; i++)
++ {
++ if (Mdl->contiguous)
++ {
++ page = nth_page(Mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(Mdl->u.nonContiguousPages, i);
++ }
++
++ ClearPageReserved(page);
++
++ if (PageHighMem(page))
++ {
++ priv->high -= PAGE_SIZE;
++ }
++ else
++ {
++ priv->low -= PAGE_SIZE;
++ }
++ }
++
++ if (Mdl->contiguous)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ if (Mdl->exact == gcvTRUE)
++ {
++ free_pages_exact(page_address(Mdl->u.contiguousPages), Mdl->numPages * PAGE_SIZE);
++ }
++ else
++#endif
++ {
++ __free_pages(Mdl->u.contiguousPages, get_order(Mdl->numPages * PAGE_SIZE));
++ }
++ }
++ else
++ {
++ _NonContiguousFree(Mdl->u.nonContiguousPages, Mdl->numPages);
++ }
++}
++
++gctINT
++_DefaultMapUser(
++ gckALLOCATOR Allocator,
++ PLINUX_MDL Mdl,
++ PLINUX_MDL_MAP MdlMap,
++ gctBOOL Cacheable
++ )
++{
++
++ gctSTRING addr;
++ unsigned long start;
++ unsigned long pfn;
++ gctINT i;
++ gckOS os = Allocator->os;
++ gcsPLATFORM * platform = os->device->platform;
++
++ PLINUX_MDL mdl = Mdl;
++ PLINUX_MDL_MAP mdlMap = MdlMap;
++
++ gcmkHEADER_ARG("Allocator=%p Mdl=%p MdlMap=%p gctBOOL=%d", Allocator, Mdl, MdlMap, Cacheable);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++
++ if (Cacheable == gcvFALSE)
++ {
++ /* Make this mapping non-cached. */
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ }
++
++ if (platform && platform->ops->adjustProt)
++ {
++ platform->ops->adjustProt(mdlMap->vma);
++ }
++
++ addr = mdl->addr;
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ page_to_pfn(mdl->u.contiguousPages),
++ mdlMap->vma->vm_end - mdlMap->vma->vm_start,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): unable to mmap ret",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ start = mdlMap->vma->vm_start;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ pfn = _NonContiguousToPfn(mdl->u.nonContiguousPages, i);
++
++ if (remap_pfn_range(mdlMap->vma,
++ start,
++ pfn,
++ PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ start += PAGE_SIZE;
++ addr += PAGE_SIZE;
++ }
++ }
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++void
++_DefaultUnmapUser(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ )
++{
++ _UnmapUserLogical(Logical, Size);
++}
++
++gceSTATUS
++_DefaultMapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ )
++{
++ *Logical = _CreateKernelVirtualMapping(Mdl);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultUnmapKernel(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ )
++{
++ _DestoryKernelVirtualMapping(Logical);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultLogicalToPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ return _ConvertLogical2Physical(
++ Allocator->os, Logical, ProcessID, Mdl, Physical);
++}
++
++gceSTATUS
++_DefaultCache(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_DefaultPhysical(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ gcmkASSERT(Mdl->pagedMem && !Mdl->contiguous);
++ *Physical = _NonContiguousToPhys(Mdl->u.nonContiguousPages, Offset);
++
++ return gcvSTATUS_OK;
++}
++
++void
++_DefaultAllocatorDestructor(
++ IN void* PrivateData
++ )
++{
++ kfree(PrivateData);
++}
++
++/* Default allocator operations. */
++gcsALLOCATOR_OPERATIONS DefaultAllocatorOperations = {
++ .Alloc = _DefaultAlloc,
++ .Free = _DefaultFree,
++ .MapUser = _DefaultMapUser,
++ .UnmapUser = _DefaultUnmapUser,
++ .MapKernel = _DefaultMapKernel,
++ .UnmapKernel = _DefaultUnmapKernel,
++ .LogicalToPhysical = _DefaultLogicalToPhysical,
++ .Cache = _DefaultCache,
++ .Physical = _DefaultPhysical,
++};
++
++/* Default allocator entry. */
++gceSTATUS
++_DefaultAlloctorInit(
++ IN gckOS Os,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++ gcsDEFAULT_PRIV_PTR priv = gcvNULL;
++
++ gcmkONERROR(
++ gckALLOCATOR_Construct(Os, &DefaultAllocatorOperations, &allocator));
++
++ priv = kzalloc(gcmSIZEOF(gcsDEFAULT_PRIV), GFP_KERNEL | gcdNOWARN);
++
++ if (!priv)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Register private data. */
++ allocator->privateData = priv;
++ allocator->privateDataDestructor = _DefaultAllocatorDestructor;
++
++ allocator->debugfsInit = _DefaultAllocatorDebugfsInit;
++ allocator->debugfsCleanup = _DefaultAllocatorDebugfsCleanup;
++
++ *Allocator = allocator;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++/***************************************************************************\
++************************ Allocator helper ***********************************
++\***************************************************************************/
++
++gceSTATUS
++gckALLOCATOR_Construct(
++ IN gckOS Os,
++ IN gcsALLOCATOR_OPERATIONS * Operations,
++ OUT gckALLOCATOR * Allocator
++ )
++{
++ gceSTATUS status;
++ gckALLOCATOR allocator;
++
++ gcmkHEADER_ARG("Os=%p, Operations=%p, Allocator=%p",
++ Os, Operations, Allocator);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Allocator != gcvNULL);
++ gcmkVERIFY_ARGUMENT
++ ( Operations
++ && Operations->Alloc
++ && Operations->Free
++ && Operations->MapUser
++ && Operations->UnmapUser
++ && Operations->MapKernel
++ && Operations->UnmapKernel
++ && Operations->LogicalToPhysical
++ && Operations->Cache
++ && Operations->Physical
++ );
++
++ gcmkONERROR(
++ gckOS_Allocate(Os, gcmSIZEOF(gcsALLOCATOR), (gctPOINTER *)&allocator));
++
++ gckOS_ZeroMemory(allocator, gcmSIZEOF(gcsALLOCATOR));
++
++ /* Record os. */
++ allocator->os = Os;
++
++ /* Set operations. */
++ allocator->ops = Operations;
++
++ allocator->capability = gcvALLOC_FLAG_CONTIGUOUS
++ | gcvALLOC_FLAG_NON_CONTIGUOUS
++ | gcvALLOC_FLAG_CACHEABLE
++ | gcvALLOC_FLAG_MEMLIMIT;
++ ;
++
++ *Allocator = allocator;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/******************************************************************************\
++******************************** Debugfs Support *******************************
++\******************************************************************************/
++
++static gceSTATUS
++_AllocatorDebugfsInit(
++ IN gckOS Os
++ )
++{
++ gceSTATUS status;
++ gckGALDEVICE device = Os->device;
++
++ gckDEBUGFS_DIR dir = &Os->allocatorDebugfsDir;
++
++ gcmkONERROR(gckDEBUGFS_DIR_Init(dir, device->debugfsDir.root, "allocators"));
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++static void
++_AllocatorDebugfsCleanup(
++ IN gckOS Os
++ )
++{
++ gckDEBUGFS_DIR dir = &Os->allocatorDebugfsDir;
++
++ gckDEBUGFS_DIR_Deinit(dir);
++}
++
++/***************************************************************************\
++************************ Allocator management *******************************
++\***************************************************************************/
++
++gceSTATUS
++gckOS_ImportAllocators(
++ gckOS Os
++ )
++{
++ gceSTATUS status;
++ gctUINT i;
++ gckALLOCATOR allocator;
++
++ _AllocatorDebugfsInit(Os);
++
++ INIT_LIST_HEAD(&Os->allocatorList);
++
++ for (i = 0; i < gcmCOUNTOF(allocatorArray); i++)
++ {
++ if (allocatorArray[i].construct)
++ {
++ /* Construct allocator. */
++ status = allocatorArray[i].construct(Os, &allocator);
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkPRINT("["DEVICE_NAME"]: Can't construct allocator(%s)",
++ allocatorArray[i].name);
++
++ continue;
++ }
++
++ allocator->name = allocatorArray[i].name;
++
++ if (allocator->debugfsInit)
++ {
++ /* Init allocator's debugfs. */
++ allocator->debugfsInit(allocator, &Os->allocatorDebugfsDir);
++ }
++
++ list_add_tail(&allocator->head, &Os->allocatorList);
++ }
++ }
++
++#if gcdDEBUG
++ list_for_each_entry(allocator, &Os->allocatorList, head)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d) Allocator: %s",
++ __FUNCTION__, __LINE__,
++ allocator->name
++ );
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_FreeAllocators(
++ gckOS Os
++ )
++{
++ gckALLOCATOR allocator;
++ gckALLOCATOR temp;
++
++ list_for_each_entry_safe(allocator, temp, &Os->allocatorList, head)
++ {
++ list_del(&allocator->head);
++
++ if (allocator->debugfsCleanup)
++ {
++ /* Clean up allocator's debugfs. */
++ allocator->debugfsCleanup(allocator);
++ }
++
++ /* Free private data. */
++ if (allocator->privateDataDestructor && allocator->privateData)
++ {
++ allocator->privateDataDestructor(allocator->privateData);
++ }
++
++ gckOS_Free(Os, allocator);
++ }
++
++ _AllocatorDebugfsCleanup(Os);
++
++ return gcvSTATUS_OK;
++}
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_allocator.h 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,400 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_allocator_h_
++#define __gc_hal_kernel_allocator_h_
++
++#include "gc_hal_kernel_linux.h"
++
++typedef struct _gcsALLOCATOR * gckALLOCATOR;
++
++typedef struct _gcsALLOCATOR_OPERATIONS
++{
++ /**************************************************************************
++ **
++ ** Alloc
++ **
++ ** Allocte memory, request size is page aligned.
++ **
++ ** INPUT:
++ **
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_Mdl
++ ** Pointer to Mdl whichs stores information
++ ** about allocated memory.
++ **
++ ** gctSIZE_T NumPages
++ ** Number of pages need to allocate.
++ **
++ ** gctUINT32 Flag
++ ** Allocation option.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS
++ (*Alloc)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctSIZE_T NumPages,
++ IN gctUINT32 Flag
++ );
++
++ /**************************************************************************
++ **
++ ** Free
++ **
++ ** Free memory.
++ **
++ ** INPUT:
++ **
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Mdl which stores information.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ void
++ (*Free)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl
++ );
++
++ /**************************************************************************
++ **
++ ** MapUser
++ **
++ ** Map memory to user space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl.
++ **
++ ** PLINUX_MDL_MAP MdlMap
++ ** Pointer to a MdlMap, mapped address is stored
++ ** in MdlMap->vmaAddr
++ **
++ ** gctBOOL Cacheable
++ ** Whether this mapping is cacheable.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gctINT
++ (*MapUser)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap,
++ IN gctBOOL Cacheable
++ );
++
++ /**************************************************************************
++ **
++ ** UnmapUser
++ **
++ ** Unmap address from user address space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** gctPOINTER Logical
++ ** Address to be unmap
++ **
++ ** gctUINT32 Size
++ ** Size of address space
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ void
++ (*UnmapUser)(
++ IN gckALLOCATOR Allocator,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++ );
++
++ /**************************************************************************
++ **
++ ** MapKernel
++ **
++ ** Map memory to kernel space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** OUTPUT:
++ ** gctPOINTER * Logical
++ ** Mapped kernel address.
++ */
++ gceSTATUS
++ (*MapKernel)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ OUT gctPOINTER *Logical
++ );
++
++ /**************************************************************************
++ **
++ ** UnmapKernel
++ **
++ ** Unmap memory from kernel space.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Mapped kernel address.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS
++ (*UnmapKernel)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical
++ );
++
++ /**************************************************************************
++ **
++ ** LogicalToPhysical
++ **
++ ** Get physical address from logical address, logical
++ ** address could be user virtual address or kernel
++ ** virtual address.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Mapped kernel address.
++ **
++ ** gctUINT32 ProcessID
++ ** pid of current process.
++ ** OUTPUT:
++ **
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ */
++ gceSTATUS
++ (*LogicalToPhysical)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32_PTR Physical
++ );
++
++ /**************************************************************************
++ **
++ ** Cache
++ **
++ ** Maintain cache coherency.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctPOINTER Logical
++ ** Logical address, could be user address or kernel address
++ **
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ ** gctUINT32 Bytes
++ ** Size of memory region.
++ **
++ ** gceCACHEOPERATION Opertaion
++ ** Cache operation.
++ **
++ ** OUTPUT:
++ **
++ ** Nothing.
++ **
++ */
++ gceSTATUS (*Cache)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++
++ /**************************************************************************
++ **
++ ** Physical
++ **
++ ** Get physical address from a offset in memory region.
++ **
++ ** INPUT:
++ ** gckALLOCATOR Allocator
++ ** Pointer to an gckALLOCATOER object.
++ **
++ ** PLINUX_MDL Mdl
++ ** Pointer to a Mdl object.
++ **
++ ** gctUINT32 Offset
++ ** Offset in this memory region.
++ **
++ ** OUTPUT:
++ ** gctUINT32_PTR Physical
++ ** Physical address.
++ **
++ */
++ gceSTATUS (*Physical)(
++ IN gckALLOCATOR Allocator,
++ IN PLINUX_MDL Mdl,
++ IN gctUINT32 Offset,
++ OUT gctUINT32_PTR Physical
++ );
++}
++gcsALLOCATOR_OPERATIONS;
++
++typedef struct _gcsALLOCATOR
++{
++ /* Pointer to gckOS Object. */
++ gckOS os;
++
++ /* Name. */
++ gctSTRING name;
++
++ /* Operations. */
++ gcsALLOCATOR_OPERATIONS* ops;
++
++ /* Capability of this allocator. */
++ gctUINT32 capability;
++
++ struct list_head head;
++
++ /* Debugfs entry of this allocator. */
++ gcsDEBUGFS_DIR debugfsDir;
++
++ /* Init allocator debugfs. */
++ void (*debugfsInit)(gckALLOCATOR, gckDEBUGFS_DIR);
++
++ /* Cleanup allocator debugfs. */
++ void (*debugfsCleanup)(gckALLOCATOR);
++
++ /* Private data used by customer allocator. */
++ void * privateData;
++
++ /* Private data destructor. */
++ void (*privateDataDestructor)(void *);
++}
++gcsALLOCATOR;
++
++typedef struct _gcsALLOCATOR_DESC
++{
++ /* Name of a allocator. */
++ char * name;
++
++ /* Entry function to construct a allocator. */
++ gceSTATUS (*construct)(gckOS, gckALLOCATOR *);
++}
++gcsALLOCATOR_DESC;
++
++/*
++* Helpers
++*/
++
++/* Fill a gcsALLOCATOR_DESC structure. */
++#define gcmkDEFINE_ALLOCATOR_DESC(Name, Construct) \
++ { \
++ .name = Name, \
++ .construct = Construct, \
++ }
++
++/* Construct a allocator. */
++gceSTATUS
++gckALLOCATOR_Construct(
++ IN gckOS Os,
++ IN gcsALLOCATOR_OPERATIONS * Operations,
++ OUT gckALLOCATOR * Allocator
++ );
++
++/*
++ How to implement customer allocator
++
++ Build in customer alloctor
++
++ It is recommanded that customer allocator is implmented in independent
++ source file(s) which is specified by CUSOMTER_ALLOCATOR_OBJS in Kbuld.
++
++ Register gcsALLOCATOR
++
++ For each customer specified allocator, a desciption entry must be added
++ to allocatorArray defined in gc_hal_kernel_allocator_array.h.
++
++ An entry in allocatorArray is a gcsALLOCATOR_DESC structure which describes
++ name and constructor of a gckALLOCATOR object.
++
++
++ Implement gcsALLOCATOR_DESC.init()
++
++ In gcsALLOCATOR_DESC.init(), gckALLOCATOR_Construct should be called
++ to create a gckALLOCATOR object, customer specified private data can
++ be put in gcsALLOCATOR.privateData.
++
++
++ Implement gcsALLOCATOR_OPERATIONS
++
++ When call gckALLOCATOR_Construct to create a gckALLOCATOR object, a
++ gcsALLOCATOR_OPERATIONS structure must be provided whose all members
++ implemented.
++
++*/
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.c 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,795 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifdef MODULE
++#include <linux/module.h>
++#endif
++#include <linux/init.h>
++#include <linux/debugfs.h>
++#include <linux/slab.h>
++#ifdef MODVERSIONS
++#include <linux/modversions.h>
++#endif
++#include <linux/stddef.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/mutex.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <linux/completion.h>
++#include "gc_hal_kernel_linux.h"
++
++/*
++ Prequsite:
++
++ 1) Debugfs feature must be enabled in the kernel.
++ 1.a) You can enable this, in the compilation of the uImage, all you have to do is, In the "make menuconfig" part,
++ you have to enable the debugfs in the kernel hacking part of the menu.
++
++ HOW TO USE:
++ 1) insert the driver with the following option logFileSize, Ex: insmod galcore.ko ...... logFileSize=10240
++ This gives a circular buffer of 10 MB
++
++ 2)Usually after inserting the driver, the debug file system is mounted under /sys/kernel/debug/
++
++ 2.a)If the debugfs is not mounted, you must do "mount -t debugfs none /sys/kernel/debug"
++
++ 3) To read what is being printed in the debugfs file system:
++ Ex : cat /sys/kernel/debug/gpu/galcore_trace
++
++ 4)To write into the debug file system from user side :
++ Ex: echo "hello" > cat /sys/kernel/debug/gpu/galcore_trace
++
++ 5)To write into debugfs from kernel side, Use the function called gckDebugFileSystemPrint
++
++
++ USECASE Kernel Dump:
++
++ 1) Go to /hal/inc/gc_hal_options.h, and enable the following flags:
++ - # define gcdDUMP 1
++ - # define gcdDUMP_IN_KERNEL 1
++ - # define gcdDUMP_COMMAND 1
++
++ 2) Go to /hal/kernel/gc_hal_kernel_command.c and disable the following flag
++ -#define gcdSIMPLE_COMMAND_DUMP 0
++
++ 3) Compile the driver
++ 4) insmod it with the logFileSize option
++ 5) Run an application
++ 6) You can get the dump by cat /sys/kernel/debug/gpu/galcore_trace
++
++ */
++
++/**/
++typedef va_list gctDBGARGS ;
++#define gcmkARGS_START(argument, pointer) va_start(argument, pointer)
++#define gcmkARGS_END(argument) va_end(argument)
++
++#define gcmkDBGFSPRINT(ArgumentSize, Message) \
++ { \
++ gctDBGARGS __arguments__; \
++ gcmkARGS_START(__arguments__, Message); \
++ _DebugFSPrint(ArgumentSize, Message, __arguments__);\
++ gcmkARGS_END(__arguments__); \
++ }
++
++/*Debug File System Node Struct*/
++struct _gcsDebugFileSystemNode
++{
++ /*wait queues for read and write operations*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ wait_queue_head_t read_q , write_q ;
++#else
++ struct wait_queue *read_q , *write_q ;
++#endif
++ struct dentry *parent ; /*parent directory*/
++ struct dentry *filen ; /*filename*/
++ struct semaphore sem ; /* mutual exclusion semaphore */
++ char *data ; /* The circular buffer data */
++ int size ; /* Size of the buffer pointed to by 'data' */
++ int refcount ; /* Files that have this buffer open */
++ int read_point ; /* Offset in circ. buffer of oldest data */
++ int write_point ; /* Offset in circ. buffer of newest data */
++ int offset ; /* Byte number of read_point in the stream */
++ struct _gcsDebugFileSystemNode *next ;
++} ;
++
++/* amount of data in the queue */
++#define gcmkNODE_QLEN(node) ( (node)->write_point >= (node)->read_point ? \
++ (node)->write_point - (node)->read_point : \
++ (node)->size - (node)->read_point + (node)->write_point)
++
++/* byte number of the last byte in the queue */
++#define gcmkNODE_FIRST_EMPTY_BYTE(node) ((node)->offset + gcmkNODE_QLEN(node))
++
++/*Synchronization primitives*/
++#define gcmkNODE_READQ(node) (&((node)->read_q))
++#define gcmkNODE_WRITEQ(node) (&((node)->write_q))
++#define gcmkNODE_SEM(node) (&((node)->sem))
++
++/*Utilities*/
++#define gcmkMIN(x, y) ((x) < (y) ? (x) : y)
++
++/*Debug File System Struct*/
++typedef struct _gcsDebugFileSystem
++{
++ gcsDebugFileSystemNode* linkedlist ;
++ gcsDebugFileSystemNode* currentNode ;
++ int isInited ;
++} gcsDebugFileSystem ;
++
++
++/*debug file system*/
++static gcsDebugFileSystem gc_dbgfs ;
++
++
++
++/*******************************************************************************
++ **
++ ** READ & WRITE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _ReadFromNode
++ **
++ ** 1) reading bytes out of a circular buffer with wraparound.
++ ** 2)returns caddr_t, pointer to data read, which the caller must free.
++ ** 3) length is (a pointer to) the number of bytes to be read, which will be set by this function to
++ ** be the number of bytes actually returned
++ **
++ *******************************************************************************/
++static caddr_t
++_ReadFromNode (
++ gcsDebugFileSystemNode* Node ,
++ size_t *Length ,
++ loff_t *Offset
++ )
++{
++ caddr_t retval ;
++ int bytes_copied = 0 , n , start_point , remaining ;
++
++ /* is the user trying to read data that has already scrolled off? */
++ if ( *Offset < Node->offset )
++ {
++ *Offset = Node->offset ;
++ }
++
++ /* is the user trying to read past EOF? */
++ if ( *Offset >= gcmkNODE_FIRST_EMPTY_BYTE ( Node ) )
++ {
++ return NULL ;
++ }
++
++ /* find the smaller of the total bytes we have available and what
++ * the user is asking for */
++
++ *Length = gcmkMIN ( *Length , gcmkNODE_FIRST_EMPTY_BYTE ( Node ) - *Offset ) ;
++
++ remaining = * Length ;
++
++ /* figure out where to start based on user's Offset */
++ start_point = Node->read_point + ( *Offset - Node->offset ) ;
++
++ start_point = start_point % Node->size ;
++
++ /* allocate memory to return */
++ if ( ( retval = kmalloc ( sizeof (char ) * remaining , GFP_KERNEL ) ) == NULL )
++ return NULL ;
++
++ /* copy the (possibly noncontiguous) data to our buffer */
++ while ( remaining )
++ {
++ n = gcmkMIN ( remaining , Node->size - start_point ) ;
++ memcpy ( retval + bytes_copied , Node->data + start_point , n ) ;
++ bytes_copied += n ;
++ remaining -= n ;
++ start_point = ( start_point + n ) % Node->size ;
++ }
++
++ /* advance user's file pointer */
++ *Offset += * Length ;
++
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ ** _WriteToNode
++ **
++ ** 1) writes to a circular buffer with wraparound.
++ ** 2)in case of an overflow, it overwrites the oldest unread data.
++ **
++ *********************************************************************************/
++static void
++_WriteToNode (
++ gcsDebugFileSystemNode* Node ,
++ caddr_t Buf ,
++ int Length
++ )
++{
++ int bytes_copied = 0 ;
++ int overflow = 0 ;
++ int n ;
++
++ if ( Length + gcmkNODE_QLEN ( Node ) >= ( Node->size - 1 ) )
++ {
++ overflow = 1 ;
++
++ /* in case of overflow, figure out where the new buffer will
++ * begin. we start by figuring out where the current buffer ENDS:
++ * node->parent->offset + gcmkNODE_QLEN. we then advance the end-offset
++ * by the Length of the current write, and work backwards to
++ * figure out what the oldest unoverwritten data will be (i.e.,
++ * size of the buffer). */
++ Node->offset = Node->offset + gcmkNODE_QLEN ( Node ) + Length
++ - Node->size + 1 ;
++ }
++
++ while ( Length )
++ {
++ /* how many contiguous bytes are available from the write point to
++ * the end of the circular buffer? */
++ n = gcmkMIN ( Length , Node->size - Node->write_point ) ;
++ memcpy ( Node->data + Node->write_point , Buf + bytes_copied , n ) ;
++ bytes_copied += n ;
++ Length -= n ;
++ Node->write_point = ( Node->write_point + n ) % Node->size ;
++ }
++
++ /* if there is an overflow, reset the read point to read whatever is
++ * the oldest data that we have, that has not yet been
++ * overwritten. */
++ if ( overflow )
++ {
++ Node->read_point = ( Node->write_point + 1 ) % Node->size ;
++ }
++}
++
++
++/*******************************************************************************
++ **
++ ** PRINTING UTILITY (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** _GetArgumentSize
++ **
++ **
++ *******************************************************************************/
++static gctINT
++_GetArgumentSize (
++ IN gctCONST_STRING Message
++ )
++{
++ gctINT i , count ;
++
++ for ( i = 0 , count = 0 ; Message[i] ; i += 1 )
++ {
++ if ( Message[i] == '%' )
++ {
++ count += 1 ;
++ }
++ }
++ return count * sizeof (unsigned int ) ;
++}
++
++/*******************************************************************************
++ **
++ ** _AppendString
++ **
++ **
++ *******************************************************************************/
++static ssize_t
++_AppendString (
++ IN gcsDebugFileSystemNode* Node ,
++ IN gctCONST_STRING String ,
++ IN int Length
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( Length , Node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ return - ENOMEM ;
++
++ /* copy into our temp buffer */
++ memcpy ( message , String , n ) ;
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( Node , message , n ) ;
++ kfree ( message ) ;
++ return n ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSPrint
++ **
++ **
++ *******************************************************************************/
++static void
++_DebugFSPrint (
++ IN unsigned int ArgumentSize ,
++ IN const char* Message ,
++ IN gctDBGARGS Arguments
++
++ )
++{
++ char buffer[MAX_LINE_SIZE] ;
++ int len ;
++ down ( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) ;
++ len = vsnprintf ( buffer , sizeof (buffer ) , Message , *( va_list * ) & Arguments ) ;
++ buffer[len] = '\0' ;
++
++ /* Add end-of-line if missing. */
++ if ( buffer[len - 1] != '\n' )
++ {
++ buffer[len ++] = '\n' ;
++ buffer[len] = '\0' ;
++ }
++ _AppendString ( gc_dbgfs.currentNode , buffer , len ) ;
++ up ( gcmkNODE_SEM ( gc_dbgfs.currentNode ) ) ;
++ wake_up_interruptible ( gcmkNODE_READQ ( gc_dbgfs.currentNode ) ) ; /* blocked in read*/
++}
++
++/*******************************************************************************
++ **
++ ** LINUX SYSTEM FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** find the vivlog structure associated with an inode.
++ ** returns a pointer to the structure if found, NULL if not found
++ **
++ *******************************************************************************/
++static gcsDebugFileSystemNode*
++_GetNodeInfo (
++ IN struct inode *Inode
++ )
++{
++ gcsDebugFileSystemNode* node ;
++
++ if ( Inode == NULL )
++ return NULL ;
++
++ for ( node = gc_dbgfs.linkedlist ; node != NULL ; node = node->next )
++ if ( node->filen->d_inode->i_ino == Inode->i_ino )
++ return node ;
++
++ return NULL ;
++}
++
++/*******************************************************************************
++ **
++ ** _DebugFSRead
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSRead (
++ struct file *file ,
++ char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ int retval ;
++ caddr_t data_to_return ;
++ gcsDebugFileSystemNode* node ;
++ /* get the metadata about this emlog */
++ if ( ( node = _GetNodeInfo ( file->f_path.dentry->d_inode ) ) == NULL )
++ {
++ printk ( "debugfs_read: record not found\n" ) ;
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* wait until there's data available (unless we do nonblocking reads) */
++ while ( *offset >= gcmkNODE_FIRST_EMPTY_BYTE ( node ) )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ if ( file->f_flags & O_NONBLOCK )
++ {
++ return - EAGAIN ;
++ }
++ if ( wait_event_interruptible ( ( *( gcmkNODE_READQ ( node ) ) ) , ( *offset < gcmkNODE_FIRST_EMPTY_BYTE ( node ) ) ) )
++ {
++ return - ERESTARTSYS ; /* signal: tell the fs layer to handle it */
++ }
++ /* otherwise loop, but first reacquire the lock */
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++ }
++ data_to_return = _ReadFromNode ( node , &length , offset ) ;
++ if ( data_to_return == NULL )
++ {
++ retval = 0 ;
++ goto unlock ;
++ }
++ if ( copy_to_user ( buffer , data_to_return , length ) > 0 )
++ {
++ retval = - EFAULT ;
++ }
++ else
++ {
++ retval = length ;
++ }
++ kfree ( data_to_return ) ;
++unlock:
++ up ( gcmkNODE_SEM ( node ) ) ;
++ wake_up_interruptible ( gcmkNODE_WRITEQ ( node ) ) ;
++ return retval ;
++}
++
++/*******************************************************************************
++ **
++ **_DebugFSWrite
++ **
++ *******************************************************************************/
++static ssize_t
++_DebugFSWrite (
++ struct file *file ,
++ const char __user * buffer ,
++ size_t length ,
++ loff_t * offset
++ )
++{
++ caddr_t message = NULL ;
++ int n ;
++ gcsDebugFileSystemNode*node ;
++
++ /* get the metadata about this log */
++ if ( ( node = _GetNodeInfo ( file->f_path.dentry->d_inode ) ) == NULL )
++ {
++ return - EIO ;
++ }
++
++ if ( down_interruptible ( gcmkNODE_SEM ( node ) ) )
++ {
++ return - ERESTARTSYS ;
++ }
++
++ /* if the message is longer than the buffer, just take the beginning
++ * of it, in hopes that the reader (if any) will have time to read
++ * before we wrap around and obliterate it */
++ n = gcmkMIN ( length , node->size - 1 ) ;
++
++ /* make sure we have the memory for it */
++ if ( ( message = kmalloc ( n , GFP_KERNEL ) ) == NULL )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ return - ENOMEM ;
++ }
++
++ /* copy into our temp buffer */
++ if ( copy_from_user ( message , buffer , n ) > 0 )
++ {
++ up ( gcmkNODE_SEM ( node ) ) ;
++ kfree ( message ) ;
++ return - EFAULT ;
++ }
++
++ /* now copy it into the circular buffer and free our temp copy */
++ _WriteToNode ( node , message , n ) ;
++
++ kfree ( message ) ;
++ up ( gcmkNODE_SEM ( node ) ) ;
++
++ /* wake up any readers that might be waiting for the data. we call
++ * schedule in the vague hope that a reader will run before the
++ * writer's next write, to avoid losing data. */
++ wake_up_interruptible ( gcmkNODE_READQ ( node ) ) ;
++
++ return n ;
++}
++
++/*******************************************************************************
++ **
++ ** File Operations Table
++ **
++ *******************************************************************************/
++static const struct file_operations debugfs_operations = {
++ .owner = THIS_MODULE ,
++ .read = _DebugFSRead ,
++ .write = _DebugFSWrite ,
++} ;
++
++/*******************************************************************************
++ **
++ ** INTERFACE FUNCTIONS (START)
++ **
++ *******************************************************************************/
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemIsEnabled
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++
++gctINT
++gckDebugFileSystemIsEnabled ( void )
++{
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemInitialize
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDebugFileSystemInitialize ( void )
++{
++ if ( ! gc_dbgfs.isInited )
++ {
++ gc_dbgfs.linkedlist = gcvNULL ;
++ gc_dbgfs.currentNode = gcvNULL ;
++ gc_dbgfs.isInited = 1 ;
++ }
++ return gc_dbgfs.isInited ;
++}
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemTerminate
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++
++gctINT
++gckDebugFileSystemTerminate ( void )
++{
++ gcsDebugFileSystemNode * next = gcvNULL ;
++ gcsDebugFileSystemNode * temp = gcvNULL ;
++ if ( gc_dbgfs.isInited )
++ {
++ temp = gc_dbgfs.linkedlist ;
++ while ( temp != gcvNULL )
++ {
++ next = temp->next ;
++ gckDebugFileSystemFreeNode ( temp ) ;
++ kfree ( temp ) ;
++ temp = next ;
++ }
++ gc_dbgfs.isInited = 0 ;
++ }
++ return 0 ;
++}
++
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemCreateNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ ** gckDebugFileSystemFreeNode * Device
++ ** Pointer to a variable receiving the gcsDebugFileSystemNode object pointer on
++ ** success.
++ *********************************************************************************/
++
++gctINT
++gckDebugFileSystemCreateNode (
++ IN gctINT SizeInKB ,
++ IN gctCONST_STRING ParentName ,
++ IN gctCONST_STRING NodeName ,
++ OUT gcsDebugFileSystemNode **Node
++ )
++{
++ gcsDebugFileSystemNode*node ;
++ /* allocate space for our metadata and initialize it */
++ if ( ( node = kmalloc ( sizeof (gcsDebugFileSystemNode ) , GFP_KERNEL ) ) == NULL )
++ goto struct_malloc_failed ;
++
++ /*Zero it out*/
++ memset ( node , 0 , sizeof (gcsDebugFileSystemNode ) ) ;
++
++ /*Init the sync primitives*/
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_READQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_READQ ( node ) ) ;
++#endif
++
++#if defined(DECLARE_WAIT_QUEUE_HEAD)
++ init_waitqueue_head ( gcmkNODE_WRITEQ ( node ) ) ;
++#else
++ init_waitqueue ( gcmkNODE_WRITEQ ( node ) ) ;
++#endif
++ sema_init ( gcmkNODE_SEM ( node ) , 1 ) ;
++ /*End the sync primitives*/
++
++
++ /* figure out how much of a buffer this should be and allocate the buffer */
++ node->size = 1024 * SizeInKB ;
++ if ( ( node->data = ( char * ) vmalloc ( sizeof (char ) * node->size ) ) == NULL )
++ goto data_malloc_failed ;
++
++ /*creating the debug file system*/
++ node->parent = debugfs_create_dir ( ParentName , NULL ) ;
++
++ /*creating the file*/
++ node->filen = debugfs_create_file ( NodeName , S_IRUGO | S_IWUSR , node->parent , NULL ,
++ &debugfs_operations ) ;
++
++ /* add it to our linked list */
++ node->next = gc_dbgfs.linkedlist ;
++ gc_dbgfs.linkedlist = node ;
++
++ /* pass the struct back */
++ *Node = node ;
++ return 0 ;
++
++ vfree ( node->data ) ;
++data_malloc_failed:
++ kfree ( node ) ;
++struct_malloc_failed:
++ return - ENOMEM ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemFreeNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemFreeNode (
++ IN gcsDebugFileSystemNode * Node
++ )
++{
++
++ gcsDebugFileSystemNode **ptr ;
++
++ if ( Node == NULL )
++ {
++ printk ( "null passed to free_vinfo\n" ) ;
++ return ;
++ }
++
++ down ( gcmkNODE_SEM ( Node ) ) ;
++ /*free data*/
++ vfree ( Node->data ) ;
++
++ /*Close Debug fs*/
++ if ( Node->filen )
++ {
++ debugfs_remove ( Node->filen ) ;
++ }
++ if ( Node->parent )
++ {
++ debugfs_remove ( Node->parent ) ;
++ }
++
++ /* now delete the node from the linked list */
++ ptr = & ( gc_dbgfs.linkedlist ) ;
++ while ( *ptr != Node )
++ {
++ if ( ! *ptr )
++ {
++ printk ( "corrupt info list!\n" ) ;
++ break ;
++ }
++ else
++ ptr = & ( ( **ptr ).next ) ;
++ }
++ *ptr = Node->next ;
++ up ( gcmkNODE_SEM ( Node ) ) ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemSetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemSetCurrentNode (
++ IN gcsDebugFileSystemNode * Node
++ )
++{
++ gc_dbgfs.currentNode = Node ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemGetCurrentNode
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemGetCurrentNode (
++ OUT gcsDebugFileSystemNode ** Node
++ )
++{
++ *Node = gc_dbgfs.currentNode ;
++}
++
++/*******************************************************************************
++ **
++ ** gckDebugFileSystemPrint
++ **
++ **
++ ** INPUT:
++ **
++ ** OUTPUT:
++ **
++ *******************************************************************************/
++void
++gckDebugFileSystemPrint (
++ IN gctCONST_STRING Message ,
++ ...
++ )
++{
++ gcmkDBGFSPRINT ( _GetArgumentSize ( Message ) , Message ) ;
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debugfs.h 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,84 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <stdarg.h>
++
++#ifndef __gc_hal_kernel_debugfs_h_
++#define __gc_hal_kernel_debugfs_h_
++
++ #define MAX_LINE_SIZE 768 /* Max bytes for a line of debug info */
++
++
++ typedef struct _gcsDebugFileSystemNode gcsDebugFileSystemNode ;
++
++
++/*******************************************************************************
++ **
++ ** System Related
++ **
++ *******************************************************************************/
++
++gctINT gckDebugFileSystemIsEnabled(void);
++
++gctINT gckDebugFileSystemInitialize(void);
++
++gctINT gckDebugFileSystemTerminate(void);
++
++
++/*******************************************************************************
++ **
++ ** Node Related
++ **
++ *******************************************************************************/
++
++gctINT gckDebugFileSystemCreateNode(
++ IN gctINT SizeInKB,
++ IN gctCONST_STRING ParentName ,
++ IN gctCONST_STRING NodeName,
++ OUT gcsDebugFileSystemNode **Node
++ );
++
++
++void gckDebugFileSystemFreeNode(
++ IN gcsDebugFileSystemNode * Node
++ );
++
++
++
++void gckDebugFileSystemSetCurrentNode(
++ IN gcsDebugFileSystemNode * Node
++ );
++
++
++
++void gckDebugFileSystemGetCurrentNode(
++ OUT gcsDebugFileSystemNode ** Node
++ );
++
++
++void gckDebugFileSystemPrint(
++ IN gctCONST_STRING Message,
++ ...
++ );
++
++#endif
++
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debug.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debug.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debug.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_debug.h 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,102 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_debug_h_
++#define __gc_hal_kernel_debug_h_
++
++#include <gc_hal_kernel_linux.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <stdarg.h>
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++/******************************************************************************\
++****************************** OS-dependent Macros *****************************
++\******************************************************************************/
++
++typedef va_list gctARGUMENTS;
++
++#define gcmkARGUMENTS_START(Arguments, Pointer) \
++ va_start(Arguments, Pointer)
++
++#define gcmkARGUMENTS_END(Arguments) \
++ va_end(Arguments)
++
++#define gcmkDECLARE_LOCK(__spinLock__) \
++ static DEFINE_SPINLOCK(__spinLock__);
++
++#define gcmkLOCKSECTION(__spinLock__) \
++ spin_lock(&__spinLock__)
++
++#define gcmkUNLOCKSECTION(__spinLock__) \
++ spin_unlock(&__spinLock__)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETPROCESSID() \
++ task_tgid_vnr(current)
++#else
++# define gcmkGETPROCESSID() \
++ current->tgid
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++# define gcmkGETTHREADID() \
++ task_pid_vnr(current)
++#else
++# define gcmkGETTHREADID() \
++ current->pid
++#endif
++
++#define gcmkOUTPUT_STRING(String) \
++ if(gckDebugFileSystemIsEnabled()) \
++ gckDebugFileSystemPrint(String);\
++ else\
++ printk(String); \
++ touch_softlockup_watchdog()
++
++
++#define gcmkSPRINTF(Destination, Size, Message, Value) \
++ snprintf(Destination, Size, Message, Value)
++
++#define gcmkSPRINTF2(Destination, Size, Message, Value1, Value2) \
++ snprintf(Destination, Size, Message, Value1, Value2)
++
++#define gcmkSPRINTF3(Destination, Size, Message, Value1, Value2, Value3) \
++ snprintf(Destination, Size, Message, Value1, Value2, Value3)
++
++#define gcmkVSPRINTF(Destination, Size, Message, Arguments) \
++ vsnprintf(Destination, Size, Message, *(va_list *) &Arguments)
++
++#define gcmkSTRCAT(Destination, Size, String) \
++ strncat(Destination, String, Size)
++
++/* If not zero, forces data alignment in the variable argument list
++ by its individual size. */
++#define gcdALIGNBYSIZE 1
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __gc_hal_kernel_debug_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.c 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,1676 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/slab.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/pm_runtime.h>
++
++#define _GC_OBJ_ZONE gcvZONE_DEVICE
++
++#define DEBUG_FILE "galcore_trace"
++#define PARENT_FILE "gpu"
++
++
++#ifdef FLAREON
++ static struct dove_gpio_irq_handler gc500_handle;
++#endif
++
++#define gcmIS_CORE_PRESENT(Device, Core) (Device->irqLines[Core] > 0)
++
++/******************************************************************************\
++*************************** Memory Allocation Wrappers *************************
++\******************************************************************************/
++
++static gceSTATUS
++_AllocateMemory(
++ IN gckGALDEVICE Device,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER *Logical,
++ OUT gctPHYS_ADDR *Physical,
++ OUT gctUINT32 *PhysAddr
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Bytes=%lu", Device, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++ gcmkVERIFY_ARGUMENT(Physical != NULL);
++ gcmkVERIFY_ARGUMENT(PhysAddr != NULL);
++
++ gcmkONERROR(gckOS_AllocateContiguous(
++ Device->os, gcvFALSE, &Bytes, Physical, Logical
++ ));
++
++ *PhysAddr = ((PLINUX_MDL)*Physical)->dmaHandle - Device->baseAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG(
++ "*Logical=0x%x *Physical=0x%x *PhysAddr=0x%08x",
++ *Logical, *Physical, *PhysAddr
++ );
++
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++_FreeMemory(
++ IN gckGALDEVICE Device,
++ IN gctPOINTER Logical,
++ IN gctPHYS_ADDR Physical)
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Logical=0x%x Physical=0x%x",
++ Device, Logical, Physical);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ status = gckOS_FreeContiguous(
++ Device->os, Physical, Logical,
++ ((PLINUX_MDL) Physical)->numPages * PAGE_SIZE
++ );
++
++ gcmkFOOTER();
++ return status;
++}
++
++
++
++/******************************************************************************\
++******************************* Interrupt Handler ******************************
++\******************************************************************************/
++static irqreturn_t isrRoutine(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->dataReadys[gcvCORE_MAJOR] = gcvTRUE;
++
++ up(&device->semas[gcvCORE_MAJOR]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_MAJOR]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_MAJOR] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_MAJOR], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutine2D(int irq, void *ctxt)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Call kernel interrupt notification. */
++ status = gckKERNEL_Notify(device->kernels[gcvCORE_2D], gcvNOTIFY_INTERRUPT, gcvTRUE);
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->dataReadys[gcvCORE_2D] = gcvTRUE;
++
++ up(&device->semas[gcvCORE_2D]);
++
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int threadRoutine2D(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_2D]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_2D] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_2D], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++static irqreturn_t isrRoutineVG(int irq, void *ctxt)
++{
++#if gcdENABLE_VG
++ gceSTATUS status;
++ gckGALDEVICE device;
++
++ device = (gckGALDEVICE) ctxt;
++
++ /* Serve the interrupt. */
++ status = gckVGINTERRUPT_Enque(device->kernels[gcvCORE_VG]->vg->interrupt);
++
++ /* Determine the return value. */
++ return (status == gcvSTATUS_NOT_OUR_INTERRUPT)
++ ? IRQ_RETVAL(0)
++ : IRQ_RETVAL(1);
++#else
++ return IRQ_NONE;
++#endif
++}
++
++static int threadRoutineVG(void *ctxt)
++{
++ gckGALDEVICE device = (gckGALDEVICE) ctxt;
++
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Starting isr Thread with extension=%p",
++ device);
++
++ for (;;)
++ {
++ static int down;
++
++ down = down_interruptible(&device->semas[gcvCORE_VG]);
++ if (down); /*To make gcc 4.6 happye*/
++ device->dataReadys[gcvCORE_VG] = gcvFALSE;
++
++ if (device->killThread == gcvTRUE)
++ {
++ /* The daemon exits. */
++ while (!kthread_should_stop())
++ {
++ gckOS_Delay(device->os, 1);
++ }
++
++ return 0;
++ }
++
++ gckKERNEL_Notify(device->kernels[gcvCORE_VG], gcvNOTIFY_INTERRUPT, gcvFALSE);
++ }
++}
++
++/******************************************************************************\
++******************************* gckGALDEVICE Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Construct
++**
++** Constructor.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gckGALDEVICE * Device
++** Pointer to a variable receiving the gckGALDEVICE object pointer on
++** success.
++*/
++gceSTATUS
++gckGALDEVICE_Construct(
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN struct device *pdev,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ OUT gckGALDEVICE *Device
++ )
++{
++ gctUINT32 internalBaseAddress = 0, internalAlignment = 0;
++ gctUINT32 externalBaseAddress = 0, externalAlignment = 0;
++ gctUINT32 horizontalTileSize, verticalTileSize;
++ struct resource* mem_region;
++ gctUINT32 physAddr;
++ gctUINT32 physical;
++ gckGALDEVICE device;
++ gceSTATUS status;
++ gctINT32 i;
++ gceHARDWARE_TYPE type;
++ gckDB sharedDB = gcvNULL;
++ gckKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("IrqLine=%d RegisterMemBase=0x%08x RegisterMemSize=%u "
++ "IrqLine2D=%d RegisterMemBase2D=0x%08x RegisterMemSize2D=%u "
++ "IrqLineVG=%d RegisterMemBaseVG=0x%08x RegisterMemSizeVG=%u "
++ "ContiguousBase=0x%08x ContiguousSize=%lu BankSize=%lu "
++ "FastClear=%d Compression=%d PhysBaseAddr=0x%x PhysSize=%d Signal=%d",
++ IrqLine, RegisterMemBase, RegisterMemSize,
++ IrqLine2D, RegisterMemBase2D, RegisterMemSize2D,
++ IrqLineVG, RegisterMemBaseVG, RegisterMemSizeVG,
++ ContiguousBase, ContiguousSize, BankSize, FastClear, Compression,
++ PhysBaseAddr, PhysSize, Signal);
++
++ /* Allocate device structure. */
++ device = kmalloc(sizeof(struct _gckGALDEVICE), GFP_KERNEL);
++
++ if (!device)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ memset(device, 0, sizeof(struct _gckGALDEVICE));
++
++ device->dbgnode = gcvNULL;
++ if(LogFileSize != 0)
++ {
++ if(gckDebugFileSystemCreateNode(LogFileSize,PARENT_FILE,DEBUG_FILE,&(device->dbgnode)) != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the debug file system %s/%s \n",
++ __FUNCTION__, __LINE__,
++ PARENT_FILE, DEBUG_FILE
++ );
++ }
++ else
++ {
++ /*Everything is OK*/
++ gckDebugFileSystemSetCurrentNode(device->dbgnode);
++ }
++ }
++#ifdef CONFIG_PM
++ /*Init runtime pm for gpu*/
++ pm_runtime_enable(pdev);
++ device->pmdev = pdev;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ /*get gpu regulator*/
++ device->gpu_regulator = regulator_get(pdev, "cpu_vddgpu");
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ device->gpu_regulator = devm_regulator_get(pdev, "pu");
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if (IS_ERR(device->gpu_regulator)) {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to get gpu regulator %s/%s \n",
++ __FUNCTION__, __LINE__,
++ PARENT_FILE, DEBUG_FILE);
++ gcmkONERROR(gcvSTATUS_NOT_FOUND);
++ }
++#endif
++ /*Initialize the clock structure*/
++ if (IrqLine != -1) {
++ device->clk_3d_core = clk_get(pdev, "gpu3d_clk");
++ if (!IS_ERR(device->clk_3d_core)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (cpu_is_mx6q()) {
++ device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(device->clk_3d_shader)) {
++ IrqLine = -1;
++ clk_put(device->clk_3d_core);
++ device->clk_3d_core = NULL;
++ device->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++ }
++#else
++ device->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk");
++ device->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(device->clk_3d_shader)) {
++ IrqLine = -1;
++ clk_put(device->clk_3d_core);
++ device->clk_3d_core = NULL;
++ device->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++#endif
++ } else {
++ IrqLine = -1;
++ device->clk_3d_core = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n");
++ }
++ }
++ if ((IrqLine2D != -1) || (IrqLineVG != -1)) {
++ device->clk_2d_core = clk_get(pdev, "gpu2d_clk");
++ if (IS_ERR(device->clk_2d_core)) {
++ IrqLine2D = -1;
++ IrqLineVG = -1;
++ device->clk_2d_core = NULL;
++ gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n");
++ } else {
++ if (IrqLine2D != -1) {
++ device->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk");
++ if (IS_ERR(device->clk_2d_axi)) {
++ device->clk_2d_axi = NULL;
++ IrqLine2D = -1;
++ gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n");
++ }
++ }
++ if (IrqLineVG != -1) {
++ device->clk_vg_axi = clk_get(pdev, "openvg_axi_clk");
++ if (IS_ERR(device->clk_vg_axi)) {
++ IrqLineVG = -1;
++ device->clk_vg_axi = NULL;
++ gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n");
++ }
++ }
++ }
++ }
++
++ if (IrqLine != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_MAJOR] = RegisterMemBase;
++ device->requestedRegisterMemSizes[gcvCORE_MAJOR] = RegisterMemSize;
++ }
++
++ if (IrqLine2D != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_2D] = RegisterMemBase2D;
++ device->requestedRegisterMemSizes[gcvCORE_2D] = RegisterMemSize2D;
++ }
++
++ if (IrqLineVG != -1)
++ {
++ device->requestedRegisterMemBases[gcvCORE_VG] = RegisterMemBaseVG;
++ device->requestedRegisterMemSizes[gcvCORE_VG] = RegisterMemSizeVG;
++ }
++
++ device->requestedContiguousBase = 0;
++ device->requestedContiguousSize = 0;
++
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ physical = device->requestedRegisterMemBases[i];
++
++ /* Set up register memory region. */
++ if (physical != 0)
++ {
++ mem_region = request_mem_region(
++ physical, device->requestedRegisterMemSizes[i], "galcore register region"
++ );
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %lu bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->registerBases[i] = (gctPOINTER) ioremap_nocache(
++ physical, device->requestedRegisterMemSizes[i]);
++
++ if (device->registerBases[i] == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unable to map %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ physical, device->requestedRegisterMemSizes[i]
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ physical += device->requestedRegisterMemSizes[i];
++ }
++ else
++ {
++ device->registerBases[i] = gcvNULL;
++ }
++ }
++
++ /* Set the base address */
++ device->baseAddress = PhysBaseAddr;
++
++ /* Construct the gckOS object. */
++ gcmkONERROR(gckOS_Construct(device, &device->os));
++
++ if (IrqLine != -1)
++ {
++ /* Construct the gckKERNEL object. */
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_MAJOR, device,
++ gcvNULL, &device->kernels[gcvCORE_MAJOR]));
++
++ sharedDB = device->kernels[gcvCORE_MAJOR]->db;
++
++ /* Initialize core mapping */
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_MAJOR;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_MAJOR]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Enable_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Disable_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetFastClear(
++ device->kernels[gcvCORE_MAJOR]->hardware, FastClear, Compression
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_MAJOR]->hardware, PowerManagement
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetGpuProfiler(
++ device->kernels[gcvCORE_MAJOR]->hardware, GpuProfiler
++ ));
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_MAJOR]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_MAJOR] = gcvNULL;
++ }
++
++ if (IrqLine2D != -1)
++ {
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_2D, device,
++ sharedDB, &device->kernels[gcvCORE_2D]));
++
++ if (sharedDB == gcvNULL) sharedDB = device->kernels[gcvCORE_2D]->db;
++
++ /* Verify the hardware type */
++ gcmkONERROR(gckHARDWARE_GetType(device->kernels[gcvCORE_2D]->hardware, &type));
++
++ if (type != gcvHARDWARE_2D)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Unexpected hardware type: %d\n",
++ __FUNCTION__, __LINE__,
++ type
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL)
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_2D;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_2D] = gcvCORE_2D;
++ }
++
++ /* Setup the ISR manager. */
++ gcmkONERROR(gckHARDWARE_SetIsrManager(
++ device->kernels[gcvCORE_2D]->hardware,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Enable_ISR,
++ (gctISRMANAGERFUNC) gckGALDEVICE_Disable_ISR,
++ device
++ ));
++
++ gcmkONERROR(gckHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_2D]->hardware, PowerManagement
++ ));
++
++
++#if COMMAND_PROCESSOR_VERSION == 1
++ /* Start the command queue. */
++ gcmkONERROR(gckCOMMAND_Start(device->kernels[gcvCORE_2D]->command));
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_2D] = gcvNULL;
++ }
++
++ if (IrqLineVG != -1)
++ {
++#if gcdENABLE_VG
++ gcmkONERROR(gckKERNEL_Construct(
++ device->os, gcvCORE_VG, device,
++ sharedDB, &device->kernels[gcvCORE_VG]));
++ /* Initialize core mapping */
++ if (device->kernels[gcvCORE_MAJOR] == gcvNULL
++ && device->kernels[gcvCORE_2D] == gcvNULL
++ )
++ {
++ for (i = 0; i < 8; i++)
++ {
++ device->coreMapping[i] = gcvCORE_VG;
++ }
++ }
++ else
++ {
++ device->coreMapping[gcvHARDWARE_VG] = gcvCORE_VG;
++ }
++
++
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagement(
++ device->kernels[gcvCORE_VG]->vg->hardware,
++ PowerManagement
++ ));
++
++#endif
++ }
++ else
++ {
++ device->kernels[gcvCORE_VG] = gcvNULL;
++ }
++
++ /* Initialize the ISR. */
++ device->irqLines[gcvCORE_MAJOR] = IrqLine;
++ device->irqLines[gcvCORE_2D] = IrqLine2D;
++ device->irqLines[gcvCORE_VG] = IrqLineVG;
++
++ /* Initialize the kernel thread semaphores. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->irqLines[i] != -1) sema_init(&device->semas[i], 0);
++ }
++
++ device->signal = Signal;
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL) break;
++ }
++
++ if (i == gcdMAX_GPU_COUNT)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckVGHARDWARE_QuerySystemMemory(
++ device->kernels[i]->vg->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++ /* query the amount of video memory */
++ gcmkONERROR(gckVGHARDWARE_QueryMemory(
++ device->kernels[i]->vg->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++ else
++#endif
++ {
++ /* Query the ceiling of the system memory. */
++ gcmkONERROR(gckHARDWARE_QuerySystemMemory(
++ device->kernels[i]->hardware,
++ &device->systemMemorySize,
++ &device->systemMemoryBaseAddress
++ ));
++
++ /* query the amount of video memory */
++ gcmkONERROR(gckHARDWARE_QueryMemory(
++ device->kernels[i]->hardware,
++ &device->internalSize, &internalBaseAddress, &internalAlignment,
++ &device->externalSize, &externalBaseAddress, &externalAlignment,
++ &horizontalTileSize, &verticalTileSize
++ ));
++ }
++
++
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->irqLines[i] != -1)
++ {
++ kernel = device->kernels[i];
++ break;
++ }
++ }
++
++ /* Set up the internal memory region. */
++ if (device->internalSize > 0)
++ {
++ status = gckVIDMEM_Construct(
++ device->os,
++ internalBaseAddress, device->internalSize, internalAlignment,
++ 0, &device->internalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->internalSize = 0;
++ }
++ else
++ {
++ /* Map internal memory. */
++ device->internalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->internalSize);
++
++ if (device->internalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->internalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->internalPhysicalName = gcmPTR_TO_NAME(device->internalPhysical);
++ physical += device->internalSize;
++ }
++ }
++
++ if (device->externalSize > 0)
++ {
++ /* create the external memory heap */
++ status = gckVIDMEM_Construct(
++ device->os,
++ externalBaseAddress, device->externalSize, externalAlignment,
++ 0, &device->externalVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable internal heap. */
++ device->externalSize = 0;
++ }
++ else
++ {
++ /* Map external memory. */
++ device->externalLogical
++ = (gctPOINTER) ioremap_nocache(physical, device->externalSize);
++
++ if (device->externalLogical == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ device->externalPhysical = (gctPHYS_ADDR)(gctUINTPTR_T) physical;
++ device->externalPhysicalName = gcmPTR_TO_NAME(device->externalPhysical);
++ physical += device->externalSize;
++ }
++ }
++
++ /* set up the contiguous memory */
++ device->contiguousSize = ContiguousSize;
++
++ if (ContiguousSize > 0)
++ {
++ if (ContiguousBase == 0)
++ {
++ while (device->contiguousSize > 0)
++ {
++ /* Allocate contiguous memory. */
++ status = _AllocateMemory(
++ device,
++ device->contiguousSize,
++ &device->contiguousBase,
++ &device->contiguousPhysical,
++ &physAddr
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ device->contiguousPhysicalName = gcmPTR_TO_NAME(device->contiguousPhysical);
++ status = gckVIDMEM_Construct(
++ device->os,
++ physAddr | device->systemMemoryBaseAddress,
++ device->contiguousSize,
++ 64,
++ BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++
++ gcmkONERROR(_FreeMemory(
++ device,
++ device->contiguousBase,
++ device->contiguousPhysical
++ ));
++
++ gcmRELEASE_NAME(device->contiguousPhysicalName);
++ device->contiguousBase = gcvNULL;
++ device->contiguousPhysical = gcvNULL;
++ }
++
++ if (device->contiguousSize <= (4 << 20))
++ {
++ device->contiguousSize = 0;
++ }
++ else
++ {
++ device->contiguousSize -= (4 << 20);
++ }
++ }
++ }
++ else
++ {
++ /* Create the contiguous memory heap. */
++ status = gckVIDMEM_Construct(
++ device->os,
++ ContiguousBase | device->systemMemoryBaseAddress,
++ ContiguousSize,
++ 64, BankSize,
++ &device->contiguousVidMem
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error, disable contiguous memory pool. */
++ device->contiguousVidMem = gcvNULL;
++ device->contiguousSize = 0;
++ }
++ else
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++ mem_region = request_mem_region(
++ ContiguousBase, ContiguousSize, "galcore managed memory"
++ );
++
++ if (mem_region == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to claim %ld bytes @ 0x%08X\n",
++ __FUNCTION__, __LINE__,
++ ContiguousSize, ContiguousBase
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#endif
++
++ device->requestedContiguousBase = ContiguousBase;
++ device->requestedContiguousSize = ContiguousSize;
++
++#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (gcmIS_CORE_PRESENT(device, gcvCORE_VG))
++ {
++ device->contiguousBase
++#if gcdPAGED_MEMORY_CACHEABLE
++ = (gctPOINTER) ioremap_cached(ContiguousBase, ContiguousSize);
++#else
++ = (gctPOINTER) ioremap_nocache(ContiguousBase, ContiguousSize);
++#endif
++ if (device->contiguousBase == gcvNULL)
++ {
++ device->contiguousVidMem = gcvNULL;
++ device->contiguousSize = 0;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++#endif
++
++ device->contiguousPhysical = gcvNULL;
++ device->contiguousPhysicalName = 0;
++ device->contiguousSize = ContiguousSize;
++ device->contiguousMapped = gcvTRUE;
++ }
++ }
++ }
++
++ /* Return pointer to the device. */
++ * Device = device;
++
++ gcmkFOOTER_ARG("*Device=0x%x", * Device);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Roll back. */
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Destroy
++**
++** Class destructor.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Destroy(
++ gckGALDEVICE Device)
++{
++ gctINT i;
++ gceSTATUS status = gcvSTATUS_OK;
++ gckKERNEL kernel = gcvNULL;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ if (Device != gcvNULL)
++ {
++ /* Grab the first availiable kernel */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->irqLines[i] != -1)
++ {
++ kernel = Device->kernels[i];
++ break;
++ }
++ }
++ if (Device->internalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->internalPhysicalName);
++ Device->internalPhysicalName = 0;
++ }
++ if (Device->externalPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->externalPhysicalName);
++ Device->externalPhysicalName = 0;
++ }
++ if (Device->contiguousPhysicalName != 0)
++ {
++ gcmRELEASE_NAME(Device->contiguousPhysicalName);
++ Device->contiguousPhysicalName = 0;
++ }
++
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->kernels[i] != gcvNULL)
++ {
++ /* Destroy the gckKERNEL object. */
++ gcmkVERIFY_OK(gckKERNEL_Destroy(Device->kernels[i]));
++ Device->kernels[i] = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->internalLogical != gcvNULL)
++ {
++ /* Unmap the internal memory. */
++ iounmap(Device->internalLogical);
++ Device->internalLogical = gcvNULL;
++ }
++
++ if (Device->internalVidMem != gcvNULL)
++ {
++ /* Destroy the internal heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->internalVidMem));
++ Device->internalVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->externalLogical != gcvNULL)
++ {
++ /* Unmap the external memory. */
++ iounmap(Device->externalLogical);
++ Device->externalLogical = gcvNULL;
++ }
++
++ if (Device->externalVidMem != gcvNULL)
++ {
++ /* destroy the external heap */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->externalVidMem));
++ Device->externalVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if (Device->contiguousBase != gcvNULL)
++ {
++ if (Device->contiguousMapped)
++ {
++#if !gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
++ if (Device->contiguousBase)
++ {
++ /* Unmap the contiguous memory. */
++ iounmap(Device->contiguousBase);
++ }
++#endif
++ }
++ else
++ {
++ gcmkONERROR(_FreeMemory(
++ Device,
++ Device->contiguousBase,
++ Device->contiguousPhysical
++ ));
++ }
++
++ Device->contiguousBase = gcvNULL;
++ Device->contiguousPhysical = gcvNULL;
++ }
++
++ if (Device->requestedContiguousBase != 0)
++ {
++ release_mem_region(Device->requestedContiguousBase, Device->requestedContiguousSize);
++ Device->requestedContiguousBase = 0;
++ Device->requestedContiguousSize = 0;
++ }
++
++ if (Device->contiguousVidMem != gcvNULL)
++ {
++ /* Destroy the contiguous heap. */
++ gcmkVERIFY_OK(gckVIDMEM_Destroy(Device->contiguousVidMem));
++ Device->contiguousVidMem = gcvNULL;
++ }
++ }
++
++ {
++ if(gckDebugFileSystemIsEnabled())
++ {
++ gckDebugFileSystemFreeNode(Device->dbgnode);
++ kfree(Device->dbgnode);
++ Device->dbgnode = gcvNULL;
++ }
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (Device->registerBases[i] != gcvNULL)
++ {
++ /* Unmap register memory. */
++ iounmap(Device->registerBases[i]);
++ if (Device->requestedRegisterMemBases[i] != 0)
++ {
++ release_mem_region(Device->requestedRegisterMemBases[i], Device->requestedRegisterMemSizes[i]);
++ }
++
++ Device->registerBases[i] = gcvNULL;
++ Device->requestedRegisterMemBases[i] = 0;
++ Device->requestedRegisterMemSizes[i] = 0;
++ }
++ }
++
++ /*Disable clock*/
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (Device->clk_3d_axi) {
++ clk_put(Device->clk_3d_axi);
++ Device->clk_3d_axi = NULL;
++ }
++#endif
++ if (Device->clk_3d_core) {
++ clk_put(Device->clk_3d_core);
++ Device->clk_3d_core = NULL;
++ }
++ if (Device->clk_3d_shader) {
++ clk_put(Device->clk_3d_shader);
++ Device->clk_3d_shader = NULL;
++ }
++ if (Device->clk_2d_core) {
++ clk_put(Device->clk_2d_core);
++ Device->clk_2d_core = NULL;
++ }
++ if (Device->clk_2d_axi) {
++ clk_put(Device->clk_2d_axi);
++ Device->clk_2d_axi = NULL;
++ }
++ if (Device->clk_vg_axi) {
++ clk_put(Device->clk_vg_axi);
++ Device->clk_vg_axi = NULL;
++ }
++
++#ifdef CONFIG_PM
++ if(Device->pmdev)
++ pm_runtime_disable(Device->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Device->gpu_regulator) {
++ regulator_put(Device->gpu_regulator);
++ Device->gpu_regulator = NULL;
++ }
++#endif
++
++ /* Destroy the gckOS object. */
++ if (Device->os != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_Destroy(Device->os));
++ Device->os = gcvNULL;
++ }
++
++ /* Free the device. */
++ kfree(Device);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Setup_ISR
++**
++** Start the ISR routine.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Setup successfully.
++** gcvSTATUS_GENERIC_IO
++** Setup failed.
++*/
++gceSTATUS
++gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gceSTATUS status;
++ gctINT ret = -1;
++
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[Core] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /* Hook up the isr based on the irq line. */
++#ifdef FLAREON
++ gc500_handle.dev_name = "galcore interrupt service";
++ gc500_handle.dev_id = Device;
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ gc500_handle.handler = isrRoutine;
++ break;
++ case gcvCORE_2D:
++ gc500_handle.handler = isrRoutine2D;
++ break;
++ case gcvCORE_VG:
++ gc500_handle.handler = isrRoutineVG;
++ break;
++ default:
++ break;
++ }
++ gc500_handle.intr_gen = GPIO_INTR_LEVEL_TRIGGER;
++ gc500_handle.intr_trig = GPIO_TRIG_HIGH_LEVEL;
++
++ ret = dove_gpio_request(
++ DOVE_GPIO0_7, &gc500_handle
++ );
++#else
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutine, 0x0/*IRQF_DISABLED*/,
++ "galcore interrupt service", Device
++ );
++ break;
++ case gcvCORE_2D:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutine2D, 0x0/*IRQF_DISABLED*/,
++ "galcore 2D interrupt service", Device
++ );
++ break;
++ case gcvCORE_VG:
++ ret = request_irq(
++ Device->irqLines[Core], isrRoutineVG, 0x0/*IRQF_DISABLED*/,
++ "galcore VG interrupt service", Device
++ );
++ break;
++ default:
++ break;
++ }
++#endif
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not register irq line %d (error=%d)\n",
++ __FUNCTION__, __LINE__,
++ Device->irqLines[Core], ret
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->isrEnabled[Core] = 1;
++
++ /* Mark ISR as initialized. */
++ Device->isrInitializeds[Core] = gcvTRUE;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckGALDEVICE_Enable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->irqLines[Core] < 0)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ spin_lock(&Device->kernels[Core]->irq_lock);
++ if (Device->isrEnabled[Core] == 0)
++ {
++ enable_irq(Device->irqLines[Core]);
++ /* Mark ISR as initialized. */
++ Device->isrEnabled[Core] = gcvTRUE;
++ }
++ Device->isrEnabled[Core]++;
++ spin_unlock(&Device->kernels[Core]->irq_lock);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Release_ISR
++**
++** Release the irq line.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* release the irq */
++ if (Device->isrInitializeds[Core])
++ {
++#ifdef FLAREON
++ dove_gpio_free(DOVE_GPIO0_7, "galcore interrupt service");
++#else
++ free_irq(Device->irqLines[Core], Device);
++#endif
++
++ Device->isrInitializeds[Core] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckGALDEVICE_Disable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Device=0x%x Core=%d", Device, Core);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ /* disable the irq */
++ spin_lock(&Device->kernels[Core]->irq_lock);
++ if (Device->isrEnabled[Core] > 0)
++ {
++ Device->isrEnabled[Core]--;
++ if (Device->isrEnabled[Core] == 0)
++ disable_irq(Device->irqLines[Core]);
++ }
++ spin_unlock(&Device->kernels[Core]->irq_lock);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start_Threads
++**
++** Start the daemon threads.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++** gcvSTATUS_GENERIC_IO
++** Start failed.
++*/
++gceSTATUS
++gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++ struct task_struct * task;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine, Device, "galcore daemon thread");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_MAJOR] = task;
++ Device->threadInitializeds[gcvCORE_MAJOR] = gcvTRUE;
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutine2D, Device, "galcore daemon thread for 2D");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_2D] = task;
++ Device->threadInitializeds[gcvCORE_2D] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_2D] = gcvFALSE;
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Start the kernel thread. */
++ task = kthread_run(threadRoutineVG, Device, "galcore daemon thread for VG");
++
++ if (IS_ERR(task))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not start the kernel thread.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ Device->threadCtxts[gcvCORE_VG] = task;
++ Device->threadInitializeds[gcvCORE_VG] = gcvTRUE;
++ }
++ else
++ {
++ Device->threadInitializeds[gcvCORE_VG] = gcvFALSE;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop_Threads
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ /* Stop the kernel threads. */
++ if (Device->threadInitializeds[i])
++ {
++ Device->killThread = gcvTRUE;
++ up(&Device->semas[i]);
++
++ kthread_stop(Device->threadCtxts[i]);
++ Device->threadCtxts[i] = gcvNULL;
++ Device->threadInitializeds[i] = gcvFALSE;
++ }
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Start
++**
++** Start the gal device, including the following actions: setup the isr routine
++** and start the daemoni thread.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** gcvSTATUS_OK
++** Start successfully.
++*/
++gceSTATUS
++gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ /* Start the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Start_Threads(Device));
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_MAJOR));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_2D));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Setup_ISR(Device, gcvCORE_VG));
++
++ /* Switch to SUSPEND power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF_BROADCAST
++ ));
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckGALDEVICE_Stop
++**
++** Stop the gal device, including the following actions: stop the daemon
++** thread, release the irq.
++**
++** INPUT:
++**
++** gckGALDEVICE Device
++** Pointer to an gckGALDEVICE object.
++**
++** OUTPUT:
++**
++** Nothing.
++**
++** RETURNS:
++**
++** Nothing.
++*/
++gceSTATUS
++gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Device=0x%x", Device);
++
++ gcmkVERIFY_ARGUMENT(Device != NULL);
++
++ if (Device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ {
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_MAJOR]->hardware, gcvPOWER_OFF
++ ));
++
++ /* Remove the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_MAJOR));
++ }
++
++ if (Device->kernels[gcvCORE_2D] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_2D));
++
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_2D]->hardware, gcvPOWER_OFF
++ ));
++ }
++
++ if (Device->kernels[gcvCORE_VG] != gcvNULL)
++ {
++ /* Setup the ISR routine. */
++ gcmkONERROR(gckGALDEVICE_Release_ISR(Device, gcvCORE_VG));
++
++#if gcdENABLE_VG
++ /* Switch to OFF power state. */
++ gcmkONERROR(gckVGHARDWARE_SetPowerManagementState(
++ Device->kernels[gcvCORE_VG]->vg->hardware, gcvPOWER_OFF
++ ));
++#endif
++ }
++
++ /* Stop the kernel thread. */
++ gcmkONERROR(gckGALDEVICE_Stop_Threads(Device));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_device.h 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,192 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_device_h_
++#define __gc_hal_kernel_device_h_
++
++/******************************************************************************\
++******************************* gckGALDEVICE Structure *******************************
++\******************************************************************************/
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++struct contiguous_mem_pool {
++ struct dma_attrs attrs;
++ dma_addr_t phys;
++ void *virt;
++ size_t size;
++};
++#endif
++
++typedef struct _gckGALDEVICE
++{
++ /* Objects. */
++ gckOS os;
++ gckKERNEL kernels[gcdMAX_GPU_COUNT];
++
++ /* Attributes. */
++ gctSIZE_T internalSize;
++ gctPHYS_ADDR internalPhysical;
++ gctUINT32 internalPhysicalName;
++ gctPOINTER internalLogical;
++ gckVIDMEM internalVidMem;
++ gctSIZE_T externalSize;
++ gctPHYS_ADDR externalPhysical;
++ gctUINT32 externalPhysicalName;
++ gctPOINTER externalLogical;
++ gckVIDMEM externalVidMem;
++ gckVIDMEM contiguousVidMem;
++ gctPOINTER contiguousBase;
++ gctPHYS_ADDR contiguousPhysical;
++ gctUINT32 contiguousPhysicalName;
++ gctSIZE_T contiguousSize;
++ gctBOOL contiguousMapped;
++ gctPOINTER contiguousMappedUser;
++ gctSIZE_T systemMemorySize;
++ gctUINT32 systemMemoryBaseAddress;
++ gctPOINTER registerBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T registerSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 baseAddress;
++ gctUINT32 requestedRegisterMemBases[gcdMAX_GPU_COUNT];
++ gctSIZE_T requestedRegisterMemSizes[gcdMAX_GPU_COUNT];
++ gctUINT32 requestedContiguousBase;
++ gctSIZE_T requestedContiguousSize;
++
++ /* IRQ management. */
++ gctINT irqLines[gcdMAX_GPU_COUNT];
++ gctBOOL isrInitializeds[gcdMAX_GPU_COUNT];
++ gctINT isrEnabled[gcdMAX_GPU_COUNT];
++ gctBOOL dataReadys[gcdMAX_GPU_COUNT];
++
++ /* Thread management. */
++ struct task_struct *threadCtxts[gcdMAX_GPU_COUNT];
++ struct semaphore semas[gcdMAX_GPU_COUNT];
++ gctBOOL threadInitializeds[gcdMAX_GPU_COUNT];
++ gctBOOL killThread;
++
++ /* Signal management. */
++ gctINT signal;
++
++ /* Core mapping */
++ gceCORE coreMapping[8];
++
++ /* States before suspend. */
++ gceCHIPPOWERSTATE statesStored[gcdMAX_GPU_COUNT];
++
++ /*Device Debug File System Entry in Kernel*/
++ struct _gcsDebugFileSystemNode * dbgnode;
++
++ /* Clock management.*/
++ struct clk *clk_3d_core;
++ struct clk *clk_3d_shader;
++ struct clk *clk_3d_axi;
++ struct clk *clk_2d_core;
++ struct clk *clk_2d_axi;
++ struct clk *clk_vg_axi;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ /*Power management.*/
++ struct regulator *gpu_regulator;
++#endif
++ /*Run time pm*/
++ struct device *pmdev;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc[gcdMAX_GPU_COUNT];
++#endif
++}
++* gckGALDEVICE;
++
++typedef struct _gcsHAL_PRIVATE_DATA
++{
++ gckGALDEVICE device;
++ gctPOINTER mappedMemory;
++ gctPOINTER contiguousLogical;
++ /* The process opening the device may not be the same as the one that closes it. */
++ gctUINT32 pidOpen;
++}
++gcsHAL_PRIVATE_DATA, * gcsHAL_PRIVATE_DATA_PTR;
++
++gceSTATUS gckGALDEVICE_Enable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Disable_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Setup_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Release_ISR(
++ IN gckGALDEVICE Device,
++ IN gceCORE Core
++ );
++
++gceSTATUS gckGALDEVICE_Start_Threads(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop_Threads(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Start(
++ IN gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Stop(
++ gckGALDEVICE Device
++ );
++
++gceSTATUS gckGALDEVICE_Construct(
++ IN gctINT IrqLine,
++ IN gctUINT32 RegisterMemBase,
++ IN gctSIZE_T RegisterMemSize,
++ IN gctINT IrqLine2D,
++ IN gctUINT32 RegisterMemBase2D,
++ IN gctSIZE_T RegisterMemSize2D,
++ IN gctINT IrqLineVG,
++ IN gctUINT32 RegisterMemBaseVG,
++ IN gctSIZE_T RegisterMemSizeVG,
++ IN gctUINT32 ContiguousBase,
++ IN gctSIZE_T ContiguousSize,
++ IN gctSIZE_T BankSize,
++ IN gctINT FastClear,
++ IN gctINT Compression,
++ IN gctUINT32 PhysBaseAddr,
++ IN gctUINT32 PhysSize,
++ IN gctINT Signal,
++ IN gctUINT LogFileSize,
++ IN struct device *pdev,
++ IN gctINT PowerManagement,
++ IN gctINT GpuProfiler,
++ OUT gckGALDEVICE *Device
++ );
++
++gceSTATUS gckGALDEVICE_Destroy(
++ IN gckGALDEVICE Device
++ );
++
++#endif /* __gc_hal_kernel_device_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_driver.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_driver.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_driver.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_driver.c 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,1476 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++#include <linux/device.h>
++#include <linux/slab.h>
++#include <linux/notifier.h>
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_driver.h"
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#ifdef CONFIG_PXA_DVFM
++# include <mach/dvfm.h>
++# include <mach/pxa3xx_dvfm.h>
++#endif
++
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++# include <linux/resmem_account.h>
++# include <linux/kernel.h>
++# include <linux/mm.h>
++# include <linux/oom.h>
++# include <linux/sched.h>
++# include <linux/notifier.h>
++
++struct task_struct *lowmem_deathpending;
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data);
++
++static struct notifier_block task_nb = {
++ .notifier_call = task_notify_func,
++};
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data)
++{
++ struct task_struct *task = data;
++
++ if (task == lowmem_deathpending)
++ lowmem_deathpending = NULL;
++
++ return NOTIFY_OK;
++}
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#include <mach/viv_gpu.h>
++#else
++#include <linux/pm_runtime.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <mach/busfreq.h>
++#else
++#include <linux/busfreq-imx6.h>
++#include <linux/reset.h>
++#endif
++#endif
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_DRIVER
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/device_cooling.h>
++#define REG_THERMAL_NOTIFIER(a) register_devfreq_cooling_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_devfreq_cooling_notifier(a);
++#else
++extern int register_thermal_notifier(struct notifier_block *nb);
++extern int unregister_thermal_notifier(struct notifier_block *nb);
++#define REG_THERMAL_NOTIFIER(a) register_thermal_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_thermal_notifier(a);
++#endif
++#endif
++
++MODULE_DESCRIPTION("Vivante Graphics Driver");
++MODULE_LICENSE("GPL");
++
++static struct class* gpuClass;
++
++static gckGALDEVICE galDevice;
++
++static uint major = 199;
++module_param(major, uint, 0644);
++
++static int irqLine = -1;
++module_param(irqLine, int, 0644);
++
++static ulong registerMemBase = 0x80000000;
++module_param(registerMemBase, ulong, 0644);
++
++static ulong registerMemSize = 2 << 10;
++module_param(registerMemSize, ulong, 0644);
++
++static int irqLine2D = -1;
++module_param(irqLine2D, int, 0644);
++
++static ulong registerMemBase2D = 0x00000000;
++module_param(registerMemBase2D, ulong, 0644);
++
++static ulong registerMemSize2D = 2 << 10;
++module_param(registerMemSize2D, ulong, 0644);
++
++static int irqLineVG = -1;
++module_param(irqLineVG, int, 0644);
++
++static ulong registerMemBaseVG = 0x00000000;
++module_param(registerMemBaseVG, ulong, 0644);
++
++static ulong registerMemSizeVG = 2 << 10;
++module_param(registerMemSizeVG, ulong, 0644);
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static ulong contiguousSize = 128 << 20;
++#else
++static ulong contiguousSize = 4 << 20;
++#endif
++module_param(contiguousSize, ulong, 0644);
++
++static ulong contiguousBase = 0;
++module_param(contiguousBase, ulong, 0644);
++
++static ulong bankSize = 0;
++module_param(bankSize, ulong, 0644);
++
++static int fastClear = -1;
++module_param(fastClear, int, 0644);
++
++static int compression = -1;
++module_param(compression, int, 0644);
++
++static int powerManagement = 1;
++module_param(powerManagement, int, 0644);
++
++static int gpuProfiler = 0;
++module_param(gpuProfiler, int, 0644);
++
++static int signal = 48;
++module_param(signal, int, 0644);
++
++static ulong baseAddress = 0;
++module_param(baseAddress, ulong, 0644);
++
++static ulong physSize = 0;
++module_param(physSize, ulong, 0644);
++
++static uint logFileSize=0;
++module_param(logFileSize,uint, 0644);
++
++static int showArgs = 0;
++module_param(showArgs, int, 0644);
++
++int gpu3DMinClock = 0;
++module_param(gpu3DMinClock, int, 0644);
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER
++ unsigned long coreClock = 156000000;
++ module_param(coreClock, ulong, 0644);
++#endif
++
++static int drv_open(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static int drv_release(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ );
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ );
++
++static struct file_operations driver_fops =
++{
++ .owner = THIS_MODULE,
++ .open = drv_open,
++ .release = drv_release,
++ .unlocked_ioctl = drv_ioctl,
++#ifdef HAVE_COMPAT_IOCTL
++ .compat_ioctl = drv_ioctl,
++#endif
++ .mmap = drv_mmap,
++};
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++static size_t viv_gpu_resmem_query(struct task_struct *p, struct reserved_memory_account *m);
++static struct reserved_memory_account viv_gpu_resmem_handler = {
++ .name = "viv_gpu",
++ .get_page_used_by_process = viv_gpu_resmem_query,
++};
++
++size_t viv_gpu_resmem_query(struct task_struct *p, struct reserved_memory_account *m)
++{
++ gcuDATABASE_INFO info;
++ unsigned int processid = p->pid;
++ gckKERNEL gpukernel = m->data;
++
++ /* ignore error happens in this api. */
++ if (gckKERNEL_QueryProcessDB(gpukernel, processid, false, gcvDB_VIDEO_MEMORY, &info) != gcvSTATUS_OK)
++ return 0;
++
++ /* we return pages. */
++ if (info.counters.bytes > 0)
++ return info.counters.bytes / PAGE_SIZE;
++ return 0;
++}
++#endif
++
++int drv_open(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gctBOOL attached = gcvFALSE;
++ gcsHAL_PRIVATE_DATA_PTR data = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL);
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ data->device = galDevice;
++ data->mappedMemory = gcvNULL;
++ data->contiguousLogical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&data->pidOpen));
++
++ /* Attached the process. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE));
++ }
++ }
++ attached = gcvTRUE;
++
++ if (!galDevice->contiguousMapped)
++ {
++ gcmkONERROR(gckOS_MapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ &data->contiguousLogical
++ ));
++ }
++
++ filp->private_data = data;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ if (data != gcvNULL)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical
++ ));
++ }
++
++ kfree(data);
++ }
++
++ if (attached)
++ {
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE));
++ }
++ }
++ }
++
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++int drv_release(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (!device->contiguousMapped)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_UnmapMemoryEx(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical,
++ data->pidOpen
++ ));
++
++ data->contiguousLogical = gcvNULL;
++ }
++ }
++
++ /* A process gets detached. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen));
++ }
++ }
++
++ kfree(data);
++ filp->private_data = NULL;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gctUINT32 copyLen;
++ DRIVER_ARGS drvArgs;
++ gckGALDEVICE device;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gctINT32 i, count;
++
++ gcmkHEADER_ARG(
++ "filp=0x%08X ioctlCode=0x%08X arg=0x%08X",
++ filp, ioctlCode, arg
++ );
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if ((ioctlCode != IOCTL_GCHAL_INTERFACE)
++ && (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE)
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown command %d\n",
++ __FUNCTION__, __LINE__,
++ ioctlCode
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get the drvArgs. */
++ copyLen = copy_from_user(
++ &drvArgs, (void *) arg, sizeof(DRIVER_ARGS)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of the input arguments.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Now bring in the gcsHAL_INTERFACE structure. */
++ if ((drvArgs.InputBufferSize != sizeof(gcsHAL_INTERFACE))
++ || (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE))
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): input or/and output structures are invalid.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ copyLen = copy_from_user(
++ &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of input HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (iface.command == gcvHAL_CHIP_INFO)
++ {
++ count = 0;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ iface.u.ChipInfo.types[count] = gcvHARDWARE_VG;
++ }
++ else
++#endif
++ {
++ gcmkVERIFY_OK(gckHARDWARE_GetType(device->kernels[i]->hardware,
++ &iface.u.ChipInfo.types[count]));
++ }
++ count++;
++ }
++ }
++
++ iface.u.ChipInfo.count = count;
++ iface.status = status = gcvSTATUS_OK;
++ }
++ else
++ {
++ if (iface.hardwareType < 0 || iface.hardwareType > 7)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown hardwareType %d\n",
++ __FUNCTION__, __LINE__,
++ iface.hardwareType
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (device->coreMapping[iface.hardwareType] == gcvCORE_VG)
++ {
++ status = gckVGKERNEL_Dispatch(device->kernels[gcvCORE_VG],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ else
++#endif
++ {
++ status = gckKERNEL_Dispatch(device->kernels[device->coreMapping[iface.hardwareType]],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ }
++
++ /* Redo system call after pending signal is handled. */
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkFOOTER();
++ return -ERESTARTSYS;
++ }
++
++ if (gcmIS_SUCCESS(status) && (iface.command == gcvHAL_LOCK_VIDEO_MEMORY))
++ {
++ gcuVIDMEM_NODE_PTR node = gcmUINT64_TO_PTR(iface.u.LockVideoMemory.node);
++ /* Special case for mapped memory. */
++ if ((data->mappedMemory != gcvNULL)
++ && (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ )
++ {
++ /* Compute offset into mapped memory. */
++ gctUINT32 offset
++ = (gctUINT8 *) gcmUINT64_TO_PTR(iface.u.LockVideoMemory.memory)
++ - (gctUINT8 *) device->contiguousBase;
++
++ /* Compute offset into user-mapped region. */
++ iface.u.LockVideoMemory.memory =
++ gcmPTR_TO_UINT64((gctUINT8 *) data->mappedMemory + offset);
++ }
++ }
++
++ /* Copy data back to the user. */
++ copyLen = copy_to_user(
++ gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of output HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if !gcdPAGED_MEMORY_CACHEABLE
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_flags |= gcdVM_FLAGS;
++#endif
++ vma->vm_pgoff = 0;
++
++ if (device->contiguousMapped)
++ {
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret = 0;
++
++ if (size > device->contiguousSize)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Invalid mapping size.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ ret = io_remap_pfn_range(
++ vma,
++ vma->vm_start,
++ device->requestedContiguousBase >> PAGE_SHIFT,
++ size,
++ vma->vm_page_prot
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): io_remap_pfn_range failed %d\n",
++ __FUNCTION__, __LINE__,
++ ret
++ );
++
++ data->mappedMemory = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ data->mappedMemory = (gctPOINTER) vma->vm_start;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++ }
++
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++
++#if !USE_PLATFORM_DRIVER
++static int __init drv_init(void)
++#else
++static int drv_init(struct device *pdev)
++#endif
++{
++ int ret;
++ int result = -EINVAL;
++ gceSTATUS status;
++ gckGALDEVICE device = gcvNULL;
++ struct class* device_class = gcvNULL;
++
++ gcmkHEADER();
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
++ {
++# if 0
++ struct clk * clk;
++
++ clk = clk_get(NULL, "GCCLK");
++
++ if (IS_ERR(clk))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): clk get error: %d\n",
++ __FUNCTION__, __LINE__,
++ PTR_ERR(clk)
++ );
++
++ result = -ENODEV;
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ /*
++ * APMU_GC_156M, APMU_GC_312M, APMU_GC_PLL2, APMU_GC_PLL2_DIV2 currently.
++ * Use the 2X clock.
++ */
++ if (clk_set_rate(clk, coreClock * 2))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to set core clock.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ result = -EAGAIN;
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ clk_enable(clk);
++
++#if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29))
++ gc_pwr(1);
++# endif
++# endif
++ }
++#endif
++
++ printk(KERN_INFO "Galcore version %d.%d.%d.%d\n",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
++ /* when enable gpu profiler, we need to turn off gpu powerMangement */
++ if(gpuProfiler)
++ powerManagement = 0;
++ if (showArgs)
++ {
++ printk("galcore options:\n");
++ printk(" irqLine = %d\n", irqLine);
++ printk(" registerMemBase = 0x%08lX\n", registerMemBase);
++ printk(" registerMemSize = 0x%08lX\n", registerMemSize);
++
++ if (irqLine2D != -1)
++ {
++ printk(" irqLine2D = %d\n", irqLine2D);
++ printk(" registerMemBase2D = 0x%08lX\n", registerMemBase2D);
++ printk(" registerMemSize2D = 0x%08lX\n", registerMemSize2D);
++ }
++
++ if (irqLineVG != -1)
++ {
++ printk(" irqLineVG = %d\n", irqLineVG);
++ printk(" registerMemBaseVG = 0x%08lX\n", registerMemBaseVG);
++ printk(" registerMemSizeVG = 0x%08lX\n", registerMemSizeVG);
++ }
++
++ printk(" contiguousSize = %ld\n", contiguousSize);
++ printk(" contiguousBase = 0x%08lX\n", contiguousBase);
++ printk(" bankSize = 0x%08lX\n", bankSize);
++ printk(" fastClear = %d\n", fastClear);
++ printk(" compression = %d\n", compression);
++ printk(" signal = %d\n", signal);
++ printk(" baseAddress = 0x%08lX\n", baseAddress);
++ printk(" physSize = 0x%08lX\n", physSize);
++ printk(" logFileSize = %d KB \n", logFileSize);
++ printk(" powerManagement = %d\n", powerManagement);
++ printk(" gpuProfiler = %d\n", gpuProfiler);
++#if ENABLE_GPU_CLOCK_BY_DRIVER
++ printk(" coreClock = %lu\n", coreClock);
++#endif
++ }
++
++ if(logFileSize != 0)
++ {
++ gckDebugFileSystemInitialize();
++ }
++
++ /* Create the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Construct(
++ irqLine,
++ registerMemBase, registerMemSize,
++ irqLine2D,
++ registerMemBase2D, registerMemSize2D,
++ irqLineVG,
++ registerMemBaseVG, registerMemSizeVG,
++ contiguousBase, contiguousSize,
++ bankSize, fastClear, compression, baseAddress, physSize, signal,
++ logFileSize,
++ pdev,
++ powerManagement,
++ gpuProfiler,
++ &device
++ ));
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ device->pool = dev_get_drvdata(pdev);
++#endif
++
++ /* Start the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Start(device));
++
++ if ((physSize != 0)
++ && (device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0))
++ {
++ status = gckMMU_Enable(device->kernels[gcvCORE_MAJOR]->mmu, baseAddress, physSize);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Enable new MMU: status=%d\n", status);
++
++ if ((device->kernels[gcvCORE_2D] != gcvNULL)
++ && (device->kernels[gcvCORE_2D]->hardware->mmuVersion != 0))
++ {
++ status = gckMMU_Enable(device->kernels[gcvCORE_2D]->mmu, baseAddress, physSize);
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "Enable new MMU for 2D: status=%d\n", status);
++ }
++
++ /* Reset the base address */
++ device->baseAddress = 0;
++ }
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ task_free_register(&task_nb);
++ viv_gpu_resmem_handler.data = device->kernels[gcvCORE_MAJOR];
++ register_reserved_memory_account(&viv_gpu_resmem_handler);
++#endif
++
++
++ /* Register the character device. */
++ ret = register_chrdev(major, DRV_NAME, &driver_fops);
++
++ if (ret < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not allocate major number for mmap.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (major == 0)
++ {
++ major = ret;
++ }
++
++ /* Create the device class. */
++ device_class = class_create(THIS_MODULE, "graphics_class");
++
++ if (IS_ERR(device_class))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the class.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++ device_create(device_class, NULL, MKDEV(major, 0), NULL, "galcore");
++#else
++ device_create(device_class, NULL, MKDEV(major, 0), "galcore");
++#endif
++
++ galDevice = device;
++ gpuClass = device_class;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine, contiguousSize, registerMemBase
++ );
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ /* Roll back. */
++ if (device_class != gcvNULL)
++ {
++ device_destroy(device_class, MKDEV(major, 0));
++ class_destroy(device_class);
++ }
++
++ if (device != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(device));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++ }
++
++ gcmkFOOTER();
++ return result;
++}
++
++#if !USE_PLATFORM_DRIVER
++static void __exit drv_exit(void)
++#else
++static void drv_exit(void)
++#endif
++{
++ gcmkHEADER();
++
++#ifdef CONFIG_ANDROID_RESERVED_MEMORY_ACCOUNT
++ task_free_unregister(&task_nb);
++ unregister_reserved_memory_account(&viv_gpu_resmem_handler);
++#endif
++
++ gcmkASSERT(gpuClass != gcvNULL);
++ device_destroy(gpuClass, MKDEV(major, 0));
++ class_destroy(gpuClass);
++
++ unregister_chrdev(major, DRV_NAME);
++
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice));
++
++ if(gckDebugFileSystemIsEnabled())
++ {
++ gckDebugFileSystemTerminate();
++ }
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++ {
++# if 0
++ struct clk * clk = NULL;
++
++#if defined(CONFIG_PXA_DVFM) && (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29))
++ gc_pwr(0);
++#endif
++ clk = clk_get(NULL, "GCCLK");
++ clk_disable(clk);
++# endif
++ }
++#endif
++
++ gcmkFOOTER_NO();
++}
++
++#if !USE_PLATFORM_DRIVER
++ module_init(drv_init);
++ module_exit(drv_exit);
++#else
++
++#ifdef CONFIG_DOVE_GPU
++# define DEVICE_NAME "dove_gpu"
++#else
++# define DEVICE_NAME "galcore"
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static int thermal_hot_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ static gctUINT orgFscale, minFscale, maxFscale;
++ static gctBOOL critical;
++ gckHARDWARE hardware = galDevice->kernels[gcvCORE_MAJOR]->hardware;
++
++ if (event > 4) {
++ critical = gcvTRUE;
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, minFscale);
++ gckOS_Print("System is too hot. GPU3D scalign to %d/64 clock.\n", minFscale);
++ } else if (event > 1) {
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, maxFscale - (8 * event));
++ } else if (orgFscale) {
++ gckHARDWARE_SetFscaleValue(hardware, orgFscale);
++ if (critical) {
++ gckOS_Print("Hot alarm is canceled. GPU3D clock will return to %d/64\n", orgFscale);
++ critical = gcvFALSE;
++ }
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block thermal_hot_pm_notifier = {
++ .notifier_call = thermal_hot_pm_notify,
++ };
++#endif
++
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_probe(struct platform_device *pdev)
++#else
++static int __devinit gpu_probe(struct platform_device *pdev)
++#endif
++{
++ int ret = -ENODEV;
++ struct resource* res;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct device_node *dn =pdev->dev.of_node;
++ const u32 *prop;
++#else
++ struct viv_gpu_platform_data *pdata;
++#endif
++ gcmkHEADER();
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
++ if (res)
++ baseAddress = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
++ if (res)
++ irqLine = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
++ if (res)
++ {
++ registerMemBase = res->start;
++ registerMemSize = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
++ if (res)
++ irqLine2D = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
++ if (res)
++ {
++ registerMemBase2D = res->start;
++ registerMemSize2D = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
++ if (res)
++ irqLineVG = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
++ if (res)
++ {
++ registerMemBaseVG = res->start;
++ registerMemSizeVG = res->end - res->start + 1;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ pool = devm_kzalloc(&pdev->dev, sizeof(*pool), GFP_KERNEL);
++ if (!pool)
++ return -ENOMEM;
++ pool->size = contiguousSize;
++ init_dma_attrs(&pool->attrs);
++ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &pool->attrs);
++ pool->virt = dma_alloc_attrs(&pdev->dev, pool->size, &pool->phys,
++ GFP_KERNEL, &pool->attrs);
++ if (!pool->virt) {
++ dev_err(&pdev->dev, "Failed to allocate contiguous memory\n");
++ return -ENOMEM;
++ }
++ contiguousBase = pool->phys;
++ dev_set_drvdata(&pdev->dev, pool);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ prop = of_get_property(dn, "contiguousbase", NULL);
++ if(prop)
++ contiguousBase = *prop;
++ of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
++#else
++ pdata = pdev->dev.platform_data;
++ if (pdata) {
++ contiguousBase = pdata->reserved_mem_base;
++ contiguousSize = pdata->reserved_mem_size;
++ }
++#endif
++ if (contiguousSize == 0)
++ gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
++ ret = drv_init(&pdev->dev);
++
++ if (!ret)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ rstc = devm_reset_control_get(&pdev->dev, "gpu3d");
++ galDevice->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;
++
++ rstc = devm_reset_control_get(&pdev->dev, "gpu2d");
++ galDevice->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;
++
++ rstc = devm_reset_control_get(&pdev->dev, "gpuvg");
++ galDevice->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
++#endif
++ platform_set_drvdata(pdev, galDevice);
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if (galDevice->kernels[gcvCORE_MAJOR])
++ REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++ gcmkFOOTER_NO();
++ return ret;
++ }
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
++ &pool->attrs);
++#endif
++ gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
++ return ret;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_remove(struct platform_device *pdev)
++#else
++static int __devexit gpu_remove(struct platform_device *pdev)
++#endif
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ gckGALDEVICE device = platform_get_drvdata(pdev);
++ struct contiguous_mem_pool *pool = device->pool;
++#endif
++ gcmkHEADER();
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++#endif
++ drv_exit();
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
++ &pool->attrs);
++#endif
++ gcmkFOOTER_NO();
++ return 0;
++}
++
++static int gpu_suspend(struct platform_device *dev, pm_message_t state)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++
++ device = platform_get_drvdata(dev);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++ /* Store states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_QueryPowerManagementState(device->kernels[i]->vg->hardware, &device->statesStored[i]);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_QueryPowerManagementState(device->kernels[i]->hardware, &device->statesStored[i]);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_OFF);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_OFF);
++ }
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ }
++ }
++
++ return 0;
++}
++
++static int gpu_resume(struct platform_device *dev)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++ gceCHIPPOWERSTATE statesStored;
++
++ device = platform_get_drvdata(dev);
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_ON);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_ON);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ /* Convert global state to crossponding internal state. */
++ switch(device->statesStored[i])
++ {
++ case gcvPOWER_OFF:
++ statesStored = gcvPOWER_OFF_BROADCAST;
++ break;
++ case gcvPOWER_IDLE:
++ statesStored = gcvPOWER_IDLE_BROADCAST;
++ break;
++ case gcvPOWER_SUSPEND:
++ statesStored = gcvPOWER_SUSPEND_BROADCAST;
++ break;
++ case gcvPOWER_ON:
++ statesStored = gcvPOWER_ON_AUTO;
++ break;
++ default:
++ statesStored = device->statesStored[i];
++ break;
++ }
++
++ /* Restore states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, statesStored);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, statesStored);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++ }
++ }
++
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static const struct of_device_id mxs_gpu_dt_ids[] = {
++ { .compatible = "fsl,imx6q-gpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, mxs_gpu_dt_ids);
++
++#ifdef CONFIG_PM
++static int gpu_runtime_suspend(struct device *dev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 7)
++ release_bus_freq(BUS_FREQ_HIGH);
++#endif
++ return 0;
++}
++
++static int gpu_runtime_resume(struct device *dev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 7)
++ request_bus_freq(BUS_FREQ_HIGH);
++#endif
++ return 0;
++}
++
++static int gpu_system_suspend(struct device *dev)
++{
++ pm_message_t state={0};
++ return gpu_suspend(to_platform_device(dev), state);
++}
++
++static int gpu_system_resume(struct device *dev)
++{
++ return gpu_resume(to_platform_device(dev));
++}
++
++static const struct dev_pm_ops gpu_pm_ops = {
++ SET_RUNTIME_PM_OPS(gpu_runtime_suspend, gpu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(gpu_system_suspend, gpu_system_resume)
++};
++#endif
++#endif
++
++static struct platform_driver gpu_driver = {
++ .probe = gpu_probe,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ .remove = gpu_remove,
++#else
++ .remove = __devexit_p(gpu_remove),
++#endif
++
++ .suspend = gpu_suspend,
++ .resume = gpu_resume,
++
++ .driver = {
++ .name = DEVICE_NAME,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ .of_match_table = mxs_gpu_dt_ids,
++#if CONFIG_PM
++ .pm = &gpu_pm_ops,
++#endif
++#endif
++ }
++};
++
++#if 0 /*CONFIG_DOVE_GPU*/
++static struct resource gpu_resources[] = {
++ {
++ .name = "gpu_irq",
++ .flags = IORESOURCE_IRQ,
++ },
++ {
++ .name = "gpu_base",
++ .flags = IORESOURCE_MEM,
++ },
++ {
++ .name = "gpu_mem",
++ .flags = IORESOURCE_MEM,
++ },
++};
++
++static struct platform_device * gpu_device;
++#endif
++
++static int __init gpu_init(void)
++{
++ int ret = 0;
++
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ gpu_resources[0].start = gpu_resources[0].end = irqLine;
++
++ gpu_resources[1].start = registerMemBase;
++ gpu_resources[1].end = registerMemBase + registerMemSize - 1;
++
++ gpu_resources[2].start = contiguousBase;
++ gpu_resources[2].end = contiguousBase + contiguousSize - 1;
++
++ /* Allocate device */
++ gpu_device = platform_device_alloc(DEVICE_NAME, -1);
++ if (!gpu_device)
++ {
++ printk(KERN_ERR "galcore: platform_device_alloc failed.\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /* Insert resource */
++ ret = platform_device_add_resources(gpu_device, gpu_resources, 3);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add_resources failed.\n");
++ goto put_dev;
++ }
++
++ /* Add device */
++ ret = platform_device_add(gpu_device);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add failed.\n");
++ goto put_dev;
++ }
++#endif
++
++ ret = platform_driver_register(&gpu_driver);
++ if (!ret)
++ {
++ goto out;
++ }
++
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ platform_device_del(gpu_device);
++put_dev:
++ platform_device_put(gpu_device);
++#endif
++
++out:
++ return ret;
++}
++
++static void __exit gpu_exit(void)
++{
++ platform_driver_unregister(&gpu_driver);
++#if 0 /*ndef CONFIG_DOVE_GPU*/
++ platform_device_unregister(gpu_device);
++#endif
++}
++
++module_init(gpu_init);
++module_exit(gpu_exit);
++
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_iommu.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_iommu.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_iommu.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_iommu.c 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,216 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_device.h"
++
++#include <linux/iommu.h>
++#include <linux/platform_device.h>
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++typedef struct _gcsIOMMU
++{
++ struct iommu_domain * domain;
++ struct device * device;
++}
++gcsIOMMU;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static int
++_IOMMU_Fault_Handler(
++ struct iommu_domain * Domain,
++ struct device * Dev,
++ unsigned long DomainAddress,
++ int flags,
++ void * args
++ )
++#else
++static int
++_IOMMU_Fault_Handler(
++ struct iommu_domain * Domain,
++ struct device * Dev,
++ unsigned long DomainAddress,
++ int flags
++ )
++#endif
++{
++ return 0;
++}
++
++static int
++_FlatMapping(
++ IN gckIOMMU Iommu
++ )
++{
++ gceSTATUS status;
++ gctUINT32 physical;
++
++ for (physical = 0; physical < 0x80000000; physical += PAGE_SIZE)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "Map %x => %x bytes = %d",
++ physical, physical, PAGE_SIZE
++ );
++
++ gcmkONERROR(gckIOMMU_Map(Iommu, physical, physical, PAGE_SIZE));
++ }
++
++ return gcvSTATUS_OK;
++
++OnError:
++ return status;
++}
++
++void
++gckIOMMU_Destory(
++ IN gckOS Os,
++ IN gckIOMMU Iommu
++ )
++{
++ gcmkHEADER();
++
++ if (Iommu->domain && Iommu->device)
++ {
++ iommu_attach_device(Iommu->domain, Iommu->device);
++ }
++
++ if (Iommu->domain)
++ {
++ iommu_domain_free(Iommu->domain);
++ }
++
++ if (Iommu)
++ {
++ gcmkOS_SAFE_FREE(Os, Iommu);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++gceSTATUS
++gckIOMMU_Construct(
++ IN gckOS Os,
++ OUT gckIOMMU * Iommu
++ )
++{
++ gceSTATUS status;
++ gckIOMMU iommu = gcvNULL;
++ struct device *dev;
++ int ret;
++
++ gcmkHEADER();
++
++ dev = &Os->device->platform->device->dev;
++
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsIOMMU), (gctPOINTER *)&iommu));
++
++ gckOS_ZeroMemory(iommu, gcmSIZEOF(gcsIOMMU));
++
++ iommu->domain = iommu_domain_alloc(&platform_bus_type);
++
++ if (!iommu->domain)
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "iommu_domain_alloc() fail");
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler, dev);
++#else
++ iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler);
++#endif
++
++ ret = iommu_attach_device(iommu->domain, dev);
++
++ if (ret)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS, "iommu_attach_device() fail %d", ret);
++
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ iommu->device = dev;
++
++ _FlatMapping(iommu);
++
++ *Iommu = iommu;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ gckIOMMU_Destory(Os, iommu);
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckIOMMU_Map(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Physical,
++ IN gctUINT32 Bytes
++ )
++{
++ gceSTATUS status;
++ int ret;
++
++ gcmkHEADER_ARG("DomainAddress=%#X, Physical=%#X, Bytes=%d",
++ DomainAddress, Physical, Bytes);
++
++ ret = iommu_map(Iommu->domain, DomainAddress, Physical, Bytes, 0);
++
++ if (ret)
++ {
++ gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++
++ gcmkFOOTER();
++ return status;
++
++}
++
++gceSTATUS
++gckIOMMU_Unmap(
++ IN gckIOMMU Iommu,
++ IN gctUINT32 DomainAddress,
++ IN gctUINT32 Bytes
++ )
++{
++ gcmkHEADER();
++
++ iommu_unmap(Iommu->domain, DomainAddress, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.c 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,481 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#define _GC_OBJ_ZONE gcvZONE_KERNEL
++
++/******************************************************************************\
++******************************* gckKERNEL API Code ******************************
++\******************************************************************************/
++
++/*******************************************************************************
++**
++** gckKERNEL_QueryVideoMemory
++**
++** Query the amount of video memory.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** OUTPUT:
++**
++** gcsHAL_INTERFACE * Interface
++** Pointer to an gcsHAL_INTERFACE structure that will be filled in with
++** the memory information.
++*/
++gceSTATUS
++gckKERNEL_QueryVideoMemory(
++ IN gckKERNEL Kernel,
++ OUT gcsHAL_INTERFACE * Interface
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Interface != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Get internal memory size and physical address. */
++ Interface->u.QueryVideoMemory.internalSize = device->internalSize;
++ Interface->u.QueryVideoMemory.internalPhysical = device->internalPhysicalName;
++
++ /* Get external memory size and physical address. */
++ Interface->u.QueryVideoMemory.externalSize = device->externalSize;
++ Interface->u.QueryVideoMemory.externalPhysical = device->externalPhysicalName;
++
++ /* Get contiguous memory size and physical address. */
++ Interface->u.QueryVideoMemory.contiguousSize = device->contiguousSize;
++ Interface->u.QueryVideoMemory.contiguousPhysical = device->contiguousPhysicalName;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_GetVideoMemoryPool
++**
++** Get the gckVIDMEM object belonging to the specified pool.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gcePOOL Pool
++** Pool to query gckVIDMEM object for.
++**
++** OUTPUT:
++**
++** gckVIDMEM * VideoMemory
++** Pointer to a variable that will hold the pointer to the gckVIDMEM
++** object belonging to the requested pool.
++*/
++gceSTATUS
++gckKERNEL_GetVideoMemoryPool(
++ IN gckKERNEL Kernel,
++ IN gcePOOL Pool,
++ OUT gckVIDMEM * VideoMemory
++ )
++{
++ gckGALDEVICE device;
++ gckVIDMEM videoMemory;
++
++ gcmkHEADER_ARG("Kernel=%p Pool=%d", Kernel, Pool);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(VideoMemory != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Dispatch on pool. */
++ switch (Pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ videoMemory = device->internalVidMem;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ videoMemory = device->externalVidMem;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ videoMemory = device->contiguousVidMem;
++ break;
++
++ default:
++ /* Unknown pool. */
++ videoMemory = NULL;
++ }
++
++ /* Return pointer to the gckVIDMEM object. */
++ *VideoMemory = videoMemory;
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*VideoMemory=%p", *VideoMemory);
++ return (videoMemory == NULL) ? gcvSTATUS_OUT_OF_MEMORY : gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapMemory
++**
++** Map video memory into the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the base address of the mapped
++** memory region.
++*/
++gceSTATUS
++gckKERNEL_MapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_MapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_UnmapMemory
++**
++** Unmap video memory from the current process space.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of video memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** gctPOINTER Logical
++** Base address of the mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_UnmapMemory(
++ IN gckKERNEL Kernel,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gckKERNEL kernel = Kernel;
++ gctPHYS_ADDR physical = gcmNAME_TO_PTR(Physical);
++
++ return gckOS_UnmapMemory(Kernel->os, physical, Bytes, Logical);
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemoryEx(
++ IN gckKERNEL Kernel,
++ IN gceCORE Core,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ gckGALDEVICE device;
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++ gcePOOL pool;
++ gctUINT32 offset, base;
++ gceSTATUS status;
++ gctPOINTER logical;
++
++ gcmkHEADER_ARG("Kernel=%p InUserSpace=%d Address=%08x",
++ Kernel, InUserSpace, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Logical != NULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware, Address, &pool, &offset));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkONERROR(
++ gckHARDWARE_SplitMemory(Kernel->hardware, Address, &pool, &offset));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ if (device->contiguousMapped)
++ {
++ logical = device->contiguousBase;
++ }
++ else
++ {
++ gctINT processID;
++ gckOS_GetProcessID(&processID);
++
++ mdl = (PLINUX_MDL) device->contiguousPhysical;
++
++ mdlMap = FindMdlMap(mdl, processID);
++ gcmkASSERT(mdlMap);
++
++ logical = (gctPOINTER) mdlMap->vmaAddr;
++ }
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkVERIFY_OK(
++ gckVGHARDWARE_SplitMemory(Kernel->vg->hardware,
++ device->contiguousVidMem->baseAddress,
++ &pool,
++ &base));
++ }
++ else
++#endif
++ {
++ gctUINT32 baseAddress = 0;
++
++ if (Kernel->hardware->mmuVersion == 0)
++ {
++ gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
++ }
++
++ gcmkVERIFY_OK(
++ gckHARDWARE_SplitMemory(Kernel->hardware,
++ device->contiguousVidMem->baseAddress - baseAddress,
++ &pool,
++ &base));
++ }
++ offset -= base;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Build logical address of specified address. */
++ *Logical = (gctPOINTER) ((gctUINT8_PTR) logical + offset);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=%p", *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Retunn the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckKERNEL_MapVideoMemory
++**
++** Get the logical address for a hardware specific memory address for the
++** current process.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE to map the memory into the user space.
++**
++** gctUINT32 Address
++** Hardware specific memory address.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** specified memory address.
++*/
++gceSTATUS
++gckKERNEL_MapVideoMemory(
++ IN gckKERNEL Kernel,
++ IN gctBOOL InUserSpace,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * Logical
++ )
++{
++ return gckKERNEL_MapVideoMemoryEx(Kernel, gcvCORE_MAJOR, InUserSpace, Address, Logical);
++}
++/*******************************************************************************
++**
++** gckKERNEL_Notify
++**
++** This function iscalled by clients to notify the gckKERNRL object of an event.
++**
++** INPUT:
++**
++** gckKERNEL Kernel
++** Pointer to an gckKERNEL object.
++**
++** gceNOTIFY Notification
++** Notification event.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckKERNEL_Notify(
++ IN gckKERNEL Kernel,
++ IN gceNOTIFY Notification,
++ IN gctBOOL Data
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Kernel=%p Notification=%d Data=%d",
++ Kernel, Notification, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++
++ /* Dispatch on notifcation. */
++ switch (Notification)
++ {
++ case gcvNOTIFY_INTERRUPT:
++ /* Process the interrupt. */
++#if COMMAND_PROCESSOR_VERSION > 1
++ status = gckINTERRUPT_Notify(Kernel->interrupt, Data);
++#else
++ status = gckHARDWARE_Interrupt(Kernel->hardware, Data);
++#endif
++ break;
++
++ default:
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckKERNEL_QuerySettings(
++ IN gckKERNEL Kernel,
++ OUT gcsKERNEL_SETTINGS * Settings
++ )
++{
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("Kernel=%p", Kernel);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
++ gcmkVERIFY_ARGUMENT(Settings != gcvNULL);
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Kernel->context;
++
++ /* Fill in signal. */
++ Settings->signal = device->signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("Settings->signal=%d", Settings->signal);
++ return gcvSTATUS_OK;
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_linux.h 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,94 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_linux_h_
++#define __gc_hal_kernel_linux_h_
++
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/signal.h>
++#ifdef FLAREON
++# include <asm/arch-realview/dove_gpio_irq.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <linux/dma-mapping.h>
++#include <linux/kthread.h>
++
++#ifdef MODVERSIONS
++# include <linux/modversions.h>
++#endif
++#include <asm/io.h>
++#include <asm/uaccess.h>
++
++#if ENABLE_GPU_CLOCK_BY_DRIVER && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
++#include <linux/clk.h>
++#include <linux/regulator/consumer.h>
++#endif
++
++#define NTSTRSAFE_NO_CCH_FUNCTIONS
++#include "gc_hal.h"
++#include "gc_hal_driver.h"
++#include "gc_hal_kernel.h"
++#include "gc_hal_kernel_device.h"
++#include "gc_hal_kernel_os.h"
++#include "gc_hal_kernel_debugfs.h"
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
++#define FIND_TASK_BY_PID(x) pid_task(find_vpid(x), PIDTYPE_PID)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++#define FIND_TASK_BY_PID(x) find_task_by_vpid(x)
++#else
++#define FIND_TASK_BY_PID(x) find_task_by_pid(x)
++#endif
++
++#define _WIDE(string) L##string
++#define WIDE(string) _WIDE(string)
++
++#define countof(a) (sizeof(a) / sizeof(a[0]))
++
++#define DRV_NAME "galcore"
++
++#define GetPageCount(size, offset) ((((size) + ((offset) & ~PAGE_CACHE_MASK)) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION (3,7,0)
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP)
++#else
++#define gcdVM_FLAGS (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
++#endif
++
++static inline gctINT
++GetOrder(
++ IN gctINT numPages
++ )
++{
++ gctINT order = 0;
++
++ while ((1 << order) < numPages) order++;
++
++ return order;
++}
++
++#endif /* __gc_hal_kernel_linux_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_math.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_math.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_math.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_math.c 2015-07-27 23:13:06.222780123 +0200
+@@ -0,0 +1,32 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++gctINT
++gckMATH_ModuloInt(
++ IN gctINT X,
++ IN gctINT Y
++ )
++{
++ if(Y ==0) {return 0;}
++ else {return X % Y;}
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,9078 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/sched.h>
++#include <asm/atomic.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/idr.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/workqueue.h>
++#include <linux/idr.h>
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++#include <linux/math64.h>
++#endif
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/reset.h>
++static inline void imx_gpc_power_up_pu(bool flag) {}
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++#include <mach/common.h>
++#endif
++#include <linux/delay.h>
++#include <linux/pm_runtime.h>
++
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++#include <linux/file.h>
++#include "gc_hal_kernel_sync.h"
++#endif
++
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++/*******************************************************************************
++***** Version Signature *******************************************************/
++
++#ifdef ANDROID
++const char * _PLATFORM = "\n\0$PLATFORM$Android$\n";
++#else
++const char * _PLATFORM = "\n\0$PLATFORM$Linux$\n";
++#endif
++
++#define USER_SIGNAL_TABLE_LEN_INIT 64
++#define gcdSUPPRESS_OOM_MESSAGE 1
++
++#define MEMORY_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryLock, \
++ gcvINFINITE))
++
++#define MEMORY_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryLock))
++
++#define MEMORY_MAP_LOCK(os) \
++ gcmkVERIFY_OK(gckOS_AcquireMutex( \
++ (os), \
++ (os)->memoryMapLock, \
++ gcvINFINITE))
++
++#define MEMORY_MAP_UNLOCK(os) \
++ gcmkVERIFY_OK(gckOS_ReleaseMutex((os), (os)->memoryMapLock))
++
++/* Protection bit when mapping memroy to user sapce */
++#define gcmkPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE
++#define gcmkIOREMAP ioremap_wc
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_writecombine(x)
++#elif !gcdNONPAGED_MEMORY_CACHEABLE
++#define gcmkIOREMAP ioremap_nocache
++#define gcmkNONPAGED_MEMROY_PROT(x) pgprot_noncached(x)
++#endif
++
++#if gcdSUPPRESS_OOM_MESSAGE
++#define gcdNOWARN __GFP_NOWARN
++#else
++#define gcdNOWARN 0
++#endif
++
++#define gcdINFINITE_TIMEOUT (60 * 1000)
++#define gcdDETECT_TIMEOUT 0
++#define gcdDETECT_DMA_ADDRESS 1
++#define gcdDETECT_DMA_STATE 1
++
++#define gcdUSE_NON_PAGED_MEMORY_CACHE 10
++
++/******************************************************************************\
++********************************** Structures **********************************
++\******************************************************************************/
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++typedef struct _gcsNonPagedMemoryCache
++{
++#ifndef NO_DMA_COHERENT
++ gctINT size;
++ gctSTRING addr;
++ dma_addr_t dmaHandle;
++#else
++ long order;
++ struct page * page;
++#endif
++
++ struct _gcsNonPagedMemoryCache * prev;
++ struct _gcsNonPagedMemoryCache * next;
++}
++gcsNonPagedMemoryCache;
++#endif /* gcdUSE_NON_PAGED_MEMORY_CACHE */
++
++typedef struct _gcsUSER_MAPPING * gcsUSER_MAPPING_PTR;
++typedef struct _gcsUSER_MAPPING
++{
++ /* Pointer to next mapping structure. */
++ gcsUSER_MAPPING_PTR next;
++
++ /* Physical address of this mapping. */
++ gctUINT32 physical;
++
++ /* Logical address of this mapping. */
++ gctPOINTER logical;
++
++ /* Number of bytes of this mapping. */
++ gctSIZE_T bytes;
++
++ /* Starting address of this mapping. */
++ gctINT8_PTR start;
++
++ /* Ending address of this mapping. */
++ gctINT8_PTR end;
++}
++gcsUSER_MAPPING;
++
++typedef struct _gcsINTEGER_DB * gcsINTEGER_DB_PTR;
++typedef struct _gcsINTEGER_DB
++{
++ struct idr idr;
++ spinlock_t lock;
++ gctINT curr;
++}
++gcsINTEGER_DB;
++
++struct _gckOS
++{
++ /* Object. */
++ gcsOBJECT object;
++
++ /* Heap. */
++ gckHEAP heap;
++
++ /* Pointer to device */
++ gckGALDEVICE device;
++
++ /* Memory management */
++ gctPOINTER memoryLock;
++ gctPOINTER memoryMapLock;
++
++ struct _LINUX_MDL *mdlHead;
++ struct _LINUX_MDL *mdlTail;
++
++ /* Kernel process ID. */
++ gctUINT32 kernelProcessID;
++
++ /* Signal management. */
++
++ /* Lock. */
++ gctPOINTER signalMutex;
++
++ /* signal id database. */
++ gcsINTEGER_DB signalDB;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /* Lock. */
++ gctPOINTER syncPointMutex;
++
++ /* sync point id database. */
++ gcsINTEGER_DB syncPointDB;
++#endif
++
++ gcsUSER_MAPPING_PTR userMap;
++ gctPOINTER debugLock;
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ gctUINT cacheSize;
++ gcsNonPagedMemoryCache * cacheHead;
++ gcsNonPagedMemoryCache * cacheTail;
++#endif
++
++ /* workqueue for os timer. */
++ struct workqueue_struct * workqueue;
++
++ int gpu_clk_on[3];
++ struct mutex gpu_clk_mutex;
++
++ gctPOINTER vidmemMutex;
++};
++
++typedef struct _gcsSIGNAL * gcsSIGNAL_PTR;
++typedef struct _gcsSIGNAL
++{
++ /* Kernel sync primitive. */
++ struct completion obj;
++
++ /* Manual reset flag. */
++ gctBOOL manualReset;
++
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* The owner of the signal. */
++ gctHANDLE process;
++
++ gckHARDWARE hardware;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSIGNAL;
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++typedef struct _gcsSYNC_POINT * gcsSYNC_POINT_PTR;
++typedef struct _gcsSYNC_POINT
++{
++ /* The reference counter. */
++ atomic_t ref;
++
++ /* State. */
++ atomic_t state;
++
++ /* timeline. */
++ struct sync_timeline * timeline;
++
++ /* ID. */
++ gctUINT32 id;
++}
++gcsSYNC_POINT;
++#endif
++
++typedef struct _gcsPageInfo * gcsPageInfo_PTR;
++typedef struct _gcsPageInfo
++{
++ struct page **pages;
++ gctUINT32_PTR pageTable;
++}
++gcsPageInfo;
++
++typedef struct _gcsOSTIMER * gcsOSTIMER_PTR;
++typedef struct _gcsOSTIMER
++{
++ struct delayed_work work;
++ gctTIMERFUNCTION function;
++ gctPOINTER data;
++} gcsOSTIMER;
++
++/******************************************************************************\
++******************************* Private Functions ******************************
++\******************************************************************************/
++
++static gctINT
++_GetProcessID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_tgid_vnr(current);
++#else
++ return current->tgid;
++#endif
++}
++
++static gctINT
++_GetThreadID(
++ void
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++ return task_pid_vnr(current);
++#else
++ return current->pid;
++#endif
++}
++
++static PLINUX_MDL
++_CreateMdl(
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER_ARG("ProcessID=%d", ProcessID);
++
++ mdl = (PLINUX_MDL)kzalloc(sizeof(struct _LINUX_MDL), GFP_KERNEL | gcdNOWARN);
++ if (mdl == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ mdl->pid = ProcessID;
++ mdl->maps = gcvNULL;
++ mdl->prev = gcvNULL;
++ mdl->next = gcvNULL;
++
++ gcmkFOOTER_ARG("0x%X", mdl);
++ return mdl;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ );
++
++static gceSTATUS
++_DestroyMdl(
++ IN PLINUX_MDL Mdl
++ )
++{
++ PLINUX_MDL_MAP mdlMap, next;
++
++ gcmkHEADER_ARG("Mdl=0x%X", Mdl);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Mdl != gcvNULL);
++
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ next = mdlMap->next;
++
++ gcmkVERIFY_OK(_DestroyMdlMap(Mdl, mdlMap));
++
++ mdlMap = next;
++ }
++
++ kfree(Mdl);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++static PLINUX_MDL_MAP
++_CreateMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++
++ mdlMap = (PLINUX_MDL_MAP)kmalloc(sizeof(struct _LINUX_MDL_MAP), GFP_KERNEL | gcdNOWARN);
++ if (mdlMap == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ mdlMap->pid = ProcessID;
++ mdlMap->vmaAddr = gcvNULL;
++ mdlMap->vma = gcvNULL;
++ mdlMap->count = 0;
++
++ mdlMap->next = Mdl->maps;
++ Mdl->maps = mdlMap;
++
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++}
++
++static gceSTATUS
++_DestroyMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN PLINUX_MDL_MAP MdlMap
++ )
++{
++ PLINUX_MDL_MAP prevMdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X MdlMap=0x%X", Mdl, MdlMap);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(MdlMap != gcvNULL);
++ gcmkASSERT(Mdl->maps != gcvNULL);
++
++ if (Mdl->maps == MdlMap)
++ {
++ Mdl->maps = MdlMap->next;
++ }
++ else
++ {
++ prevMdlMap = Mdl->maps;
++
++ while (prevMdlMap->next != MdlMap)
++ {
++ prevMdlMap = prevMdlMap->next;
++
++ gcmkASSERT(prevMdlMap != gcvNULL);
++ }
++
++ prevMdlMap->next = MdlMap->next;
++ }
++
++ kfree(MdlMap);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT ProcessID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++
++ gcmkHEADER_ARG("Mdl=0x%X ProcessID=%d", Mdl, ProcessID);
++ if(Mdl == gcvNULL)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ mdlMap = Mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if (mdlMap->pid == ProcessID)
++ {
++ gcmkFOOTER_ARG("0x%X", mdlMap);
++ return mdlMap;
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvNULL;
++}
++
++void
++OnProcessExit(
++ IN gckOS Os,
++ IN gckKERNEL Kernel
++ )
++{
++}
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
++static inline int
++is_vmalloc_addr(
++ void *Addr
++ )
++{
++ unsigned long addr = (unsigned long)Addr;
++
++ return addr >= VMALLOC_START && addr < VMALLOC_END;
++}
++#endif
++
++static void
++_NonContiguousFree(
++ IN struct page ** Pages,
++ IN gctUINT32 NumPages
++ )
++{
++ gctINT i;
++
++ gcmkHEADER_ARG("Pages=0x%X, NumPages=%d", Pages, NumPages);
++
++ gcmkASSERT(Pages != gcvNULL);
++
++ for (i = 0; i < NumPages; i++)
++ {
++ __free_page(Pages[i]);
++ }
++
++ if (is_vmalloc_addr(Pages))
++ {
++ vfree(Pages);
++ }
++ else
++ {
++ kfree(Pages);
++ }
++
++ gcmkFOOTER_NO();
++}
++
++static struct page **
++_NonContiguousAlloc(
++ IN gctUINT32 NumPages
++ )
++{
++ struct page ** pages;
++ struct page *p;
++ gctINT i, size;
++
++ gcmkHEADER_ARG("NumPages=%lu", NumPages);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
++ if (NumPages > totalram_pages)
++#else
++ if (NumPages > num_physpages)
++#endif
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ size = NumPages * sizeof(struct page *);
++
++ pages = kmalloc(size, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ pages = vmalloc(size);
++
++ if (!pages)
++ {
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++ }
++
++ for (i = 0; i < NumPages; i++)
++ {
++ p = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN);
++
++ if (!p)
++ {
++ _NonContiguousFree(pages, i);
++ gcmkFOOTER_NO();
++ return gcvNULL;
++ }
++
++ pages[i] = p;
++ }
++
++ gcmkFOOTER_ARG("pages=0x%X", pages);
++ return pages;
++}
++
++static inline struct page *
++_NonContiguousToPage(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return Pages[Index];
++}
++
++static inline unsigned long
++_NonContiguousToPfn(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_pfn(_NonContiguousToPage(Pages, Index));
++}
++
++static inline unsigned long
++_NonContiguousToPhys(
++ IN struct page ** Pages,
++ IN gctUINT32 Index
++ )
++{
++ gcmkASSERT(Pages != gcvNULL);
++ return page_to_phys(_NonContiguousToPage(Pages, Index));
++}
++
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++
++static gctBOOL
++_AddNonPagedMemoryCache(
++ gckOS Os,
++#ifndef NO_DMA_COHERENT
++ gctINT Size,
++ gctSTRING Addr,
++ dma_addr_t DmaHandle
++#else
++ long Order,
++ struct page * Page
++#endif
++ )
++{
++ gcsNonPagedMemoryCache *cache;
++
++ if (Os->cacheSize >= gcdUSE_NON_PAGED_MEMORY_CACHE)
++ {
++ return gcvFALSE;
++ }
++
++ /* Allocate the cache record */
++ cache = (gcsNonPagedMemoryCache *)kmalloc(sizeof(gcsNonPagedMemoryCache), GFP_ATOMIC);
++
++ if (cache == gcvNULL) return gcvFALSE;
++
++#ifndef NO_DMA_COHERENT
++ cache->size = Size;
++ cache->addr = Addr;
++ cache->dmaHandle = DmaHandle;
++#else
++ cache->order = Order;
++ cache->page = Page;
++#endif
++
++ /* Add to list */
++ if (Os->cacheHead == gcvNULL)
++ {
++ cache->prev = gcvNULL;
++ cache->next = gcvNULL;
++ Os->cacheHead =
++ Os->cacheTail = cache;
++ }
++ else
++ {
++ /* Add to the tail. */
++ cache->prev = Os->cacheTail;
++ cache->next = gcvNULL;
++ Os->cacheTail->next = cache;
++ Os->cacheTail = cache;
++ }
++
++ Os->cacheSize++;
++
++ return gcvTRUE;
++}
++
++#ifndef NO_DMA_COHERENT
++static gctSTRING
++_GetNonPagedMemoryCache(
++ gckOS Os,
++ gctINT Size,
++ dma_addr_t * DmaHandle
++ )
++#else
++static struct page *
++_GetNonPagedMemoryCache(
++ gckOS Os,
++ long Order
++ )
++#endif
++{
++ gcsNonPagedMemoryCache *cache;
++#ifndef NO_DMA_COHERENT
++ gctSTRING addr;
++#else
++ struct page * page;
++#endif
++
++ if (Os->cacheHead == gcvNULL) return gcvNULL;
++
++ /* Find the right cache */
++ cache = Os->cacheHead;
++
++ while (cache != gcvNULL)
++ {
++#ifndef NO_DMA_COHERENT
++ if (cache->size == Size) break;
++#else
++ if (cache->order == Order) break;
++#endif
++
++ cache = cache->next;
++ }
++
++ if (cache == gcvNULL) return gcvNULL;
++
++ /* Remove the cache from list */
++ if (cache == Os->cacheHead)
++ {
++ Os->cacheHead = cache->next;
++
++ if (Os->cacheHead == gcvNULL)
++ {
++ Os->cacheTail = gcvNULL;
++ }
++ }
++ else
++ {
++ cache->prev->next = cache->next;
++
++ if (cache == Os->cacheTail)
++ {
++ Os->cacheTail = cache->prev;
++ }
++ else
++ {
++ cache->next->prev = cache->prev;
++ }
++ }
++
++ /* Destroy cache */
++#ifndef NO_DMA_COHERENT
++ addr = cache->addr;
++ *DmaHandle = cache->dmaHandle;
++#else
++ page = cache->page;
++#endif
++
++ kfree(cache);
++
++ Os->cacheSize--;
++
++#ifndef NO_DMA_COHERENT
++ return addr;
++#else
++ return page;
++#endif
++}
++
++static void
++_FreeAllNonPagedMemoryCache(
++ gckOS Os
++ )
++{
++ gcsNonPagedMemoryCache *cache, *nextCache;
++
++ MEMORY_LOCK(Os);
++
++ cache = Os->cacheHead;
++
++ while (cache != gcvNULL)
++ {
++ if (cache != Os->cacheTail)
++ {
++ nextCache = cache->next;
++ }
++ else
++ {
++ nextCache = gcvNULL;
++ }
++
++ /* Remove the cache from list */
++ if (cache == Os->cacheHead)
++ {
++ Os->cacheHead = cache->next;
++
++ if (Os->cacheHead == gcvNULL)
++ {
++ Os->cacheTail = gcvNULL;
++ }
++ }
++ else
++ {
++ cache->prev->next = cache->next;
++
++ if (cache == Os->cacheTail)
++ {
++ Os->cacheTail = cache->prev;
++ }
++ else
++ {
++ cache->next->prev = cache->prev;
++ }
++ }
++
++#ifndef NO_DMA_COHERENT
++ dma_free_coherent(gcvNULL,
++ cache->size,
++ cache->addr,
++ cache->dmaHandle);
++#else
++ free_pages((unsigned long)page_address(cache->page), cache->order);
++#endif
++
++ kfree(cache);
++
++ cache = nextCache;
++ }
++
++ MEMORY_UNLOCK(Os);
++}
++
++#endif /* gcdUSE_NON_PAGED_MEMORY_CACHE */
++
++/*******************************************************************************
++** Integer Id Management.
++*/
++gceSTATUS
++_AllocateIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctPOINTER KernelPointer,
++ OUT gctUINT32 *Id
++ )
++{
++ int result;
++ gctINT next;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
++ idr_preload(GFP_KERNEL | gcdNOWARN);
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++ result = idr_alloc(&Database->idr, KernelPointer, next, 0, GFP_ATOMIC);
++
++ if (!result)
++ {
++ Database->curr = *Id;
++ }
++
++ spin_unlock(&Database->lock);
++
++ idr_preload_end();
++
++ if (result < 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ *Id = result;
++#else
++again:
++ if (idr_pre_get(&Database->idr, GFP_KERNEL | gcdNOWARN) == 0)
++ {
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ spin_lock(&Database->lock);
++
++ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
++
++ /* Try to get a id greater than current id. */
++ result = idr_get_new_above(&Database->idr, KernelPointer, next, Id);
++
++ if (!result)
++ {
++ Database->curr = *Id;
++ }
++
++ spin_unlock(&Database->lock);
++
++ if (result == -EAGAIN)
++ {
++ goto again;
++ }
++
++ if (result != 0)
++ {
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_QueryIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER pointer;
++
++ spin_lock(&Database->lock);
++
++ pointer = idr_find(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ if(pointer)
++ {
++ *KernelPointer = pointer;
++ return gcvSTATUS_OK;
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_OS,
++ "%s(%d) Id = %d is not found",
++ __FUNCTION__, __LINE__, Id);
++
++ return gcvSTATUS_NOT_FOUND;
++ }
++}
++
++gceSTATUS
++_DestroyIntegerId(
++ IN gcsINTEGER_DB_PTR Database,
++ IN gctUINT32 Id
++ )
++{
++ spin_lock(&Database->lock);
++
++ idr_remove(&Database->idr, Id);
++
++ spin_unlock(&Database->lock);
++
++ return gcvSTATUS_OK;
++}
++
++static void
++_UnmapUserLogical(
++ IN gctINT Pid,
++ IN gctPOINTER Logical,
++ IN gctUINT32 Size
++)
++{
++ if (unlikely(current->mm == gcvNULL))
++ {
++ /* Do nothing if process is exiting. */
++ return;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (vm_munmap((unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): vm_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++#else
++ down_write(&current->mm->mmap_sem);
++ if (do_munmap(current->mm, (unsigned long)Logical, Size) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_munmap failed",
++ __FUNCTION__, __LINE__
++ );
++ }
++ up_write(&current->mm->mmap_sem);
++#endif
++}
++
++gceSTATUS
++_QueryProcessPageTable(
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ spinlock_t *lock;
++ gctUINTPTR_T logical = (gctUINTPTR_T)Logical;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (!current->mm)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pgd = pgd_offset(current->mm, logical);
++ if (pgd_none(*pgd) || pgd_bad(*pgd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pud = pud_offset(pgd, logical);
++ if (pud_none(*pud) || pud_bad(*pud))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pmd = pmd_offset(pud, logical);
++ if (pmd_none(*pmd) || pmd_bad(*pmd))
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &lock);
++ if (!pte)
++ {
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ if (!pte_present(*pte))
++ {
++ pte_unmap_unlock(pte, lock);
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ *Address = (pte_pfn(*pte) << PAGE_SHIFT) | (logical & ~PAGE_MASK);
++ pte_unmap_unlock(pte, lock);
++
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Construct
++**
++** Construct a new gckOS object.
++**
++** INPUT:
++**
++** gctPOINTER Context
++** Pointer to the gckGALDEVICE class.
++**
++** OUTPUT:
++**
++** gckOS * Os
++** Pointer to a variable that will hold the pointer to the gckOS object.
++*/
++gceSTATUS
++gckOS_Construct(
++ IN gctPOINTER Context,
++ OUT gckOS * Os
++ )
++{
++ gckOS os;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Context=0x%X", Context);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Os != gcvNULL);
++
++ /* Allocate the gckOS object. */
++ os = (gckOS) kmalloc(gcmSIZEOF(struct _gckOS), GFP_KERNEL | gcdNOWARN);
++
++ if (os == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ /* Zero the memory. */
++ gckOS_ZeroMemory(os, gcmSIZEOF(struct _gckOS));
++
++ /* Initialize the gckOS object. */
++ os->object.type = gcvOBJ_OS;
++
++ /* Set device device. */
++ os->device = Context;
++
++ /* IMPORTANT! No heap yet. */
++ os->heap = gcvNULL;
++
++ /* Initialize the memory lock. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryLock));
++ gcmkONERROR(gckOS_CreateMutex(os, &os->memoryMapLock));
++
++ /* Create debug lock mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->debugLock));
++
++
++ os->mdlHead = os->mdlTail = gcvNULL;
++
++ /* Get the kernel process ID. */
++ gcmkONERROR(gckOS_GetProcessID(&os->kernelProcessID));
++
++ /*
++ * Initialize the signal manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->signalMutex));
++
++ /* Initialize signal id database lock. */
++ spin_lock_init(&os->signalDB.lock);
++
++ /* Initialize signal id database. */
++ idr_init(&os->signalDB.idr);
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Initialize the sync point manager.
++ */
++
++ /* Initialize mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->syncPointMutex));
++
++ /* Initialize sync point id database lock. */
++ spin_lock_init(&os->syncPointDB.lock);
++
++ /* Initialize sync point id database. */
++ idr_init(&os->syncPointDB.idr);
++#endif
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ os->cacheSize = 0;
++ os->cacheHead = gcvNULL;
++ os->cacheTail = gcvNULL;
++#endif
++
++ /* Create a workqueue for os timer. */
++ os->workqueue = create_singlethread_workqueue("galcore workqueue");
++
++ if (os->workqueue == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mutex_init(&os->gpu_clk_mutex);
++
++ /* Construct a video memory mutex. */
++ gcmkONERROR(gckOS_CreateMutex(os, &os->vidmemMutex));
++
++ /* Return pointer to the gckOS object. */
++ *Os = os;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Os=0x%X", *Os);
++ return gcvSTATUS_OK;
++
++OnError:
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ if (os->syncPointMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->syncPointMutex));
++ }
++#endif
++
++ if (os->signalMutex != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->signalMutex));
++ }
++
++ if (os->heap != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckHEAP_Destroy(os->heap));
++ }
++
++ if (os->memoryMapLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryMapLock));
++ }
++
++ if (os->memoryLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->memoryLock));
++ }
++
++ if (os->debugLock != gcvNULL)
++ {
++ gcmkVERIFY_OK(
++ gckOS_DeleteMutex(os, os->debugLock));
++ }
++
++ if (os->workqueue != gcvNULL)
++ {
++ destroy_workqueue(os->workqueue);
++ }
++
++ kfree(os);
++
++ /* Return the error. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Destroy
++**
++** Destroy an gckOS object.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object that needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Destroy(
++ IN gckOS Os
++ )
++{
++ gckHEAP heap;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ _FreeAllNonPagedMemoryCache(Os);
++#endif
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++ /*
++ * Destroy the sync point manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->syncPointMutex));
++#endif
++
++ /*
++ * Destroy the signal manager.
++ */
++
++ /* Destroy the mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->signalMutex));
++
++ if (Os->heap != gcvNULL)
++ {
++ /* Mark gckHEAP as gone. */
++ heap = Os->heap;
++ Os->heap = gcvNULL;
++
++ /* Destroy the gckHEAP object. */
++ gcmkVERIFY_OK(gckHEAP_Destroy(heap));
++ }
++
++ /* Destroy the memory lock. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryMapLock));
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->memoryLock));
++
++ /* Destroy debug lock mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->debugLock));
++
++ /* Destroy video memory mutex. */
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->vidmemMutex));
++
++ /* Wait for all works done. */
++ flush_workqueue(Os->workqueue);
++
++ /* Destory work queue. */
++ destroy_workqueue(Os->workqueue);
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(~0U);
++
++ /* Mark the gckOS object as unknown. */
++ Os->object.type = gcvOBJ_UNKNOWN;
++
++ /* Free the gckOS object. */
++ kfree(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++static gctSTRING
++_CreateKernelVirtualMapping(
++ IN PLINUX_MDL Mdl
++ )
++{
++ gctSTRING addr = 0;
++ gctINT numPages = Mdl->numPages;
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ if (Mdl->contiguous)
++ {
++ addr = page_address(Mdl->u.contiguousPages);
++ }
++ else
++ {
++ addr = vmap(Mdl->u.nonContiguousPages,
++ numPages,
++ 0,
++ PAGE_KERNEL);
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++ }
++#else
++ struct page ** pages;
++ gctBOOL free = gcvFALSE;
++ gctINT i;
++
++ if (Mdl->contiguous)
++ {
++ pages = kmalloc(sizeof(struct page *) * numPages, GFP_KERNEL | gcdNOWARN);
++
++ if (!pages)
++ {
++ return gcvNULL;
++ }
++
++ for (i = 0; i < numPages; i++)
++ {
++ pages[i] = nth_page(Mdl->u.contiguousPages, i);
++ }
++
++ free = gcvTRUE;
++ }
++ else
++ {
++ pages = Mdl->u.nonContiguousPages;
++ }
++
++ /* ioremap() can't work on system memory since 2.6.38. */
++ addr = vmap(pages, numPages, 0, gcmkNONPAGED_MEMROY_PROT(PAGE_KERNEL));
++
++ /* Trigger a page fault. */
++ memset(addr, 0, numPages * PAGE_SIZE);
++
++ if (free)
++ {
++ kfree(pages);
++ }
++
++#endif
++
++ return addr;
++}
++
++static void
++_DestoryKernelVirtualMapping(
++ IN gctSTRING Addr
++ )
++{
++#if !gcdNONPAGED_MEMORY_CACHEABLE
++ vunmap(Addr);
++#endif
++}
++
++gceSTATUS
++gckOS_CreateKernelVirtualMapping(
++ IN gctPHYS_ADDR Physical,
++ OUT gctSIZE_T * PageCount,
++ OUT gctPOINTER * Logical
++ )
++{
++ *PageCount = ((PLINUX_MDL)Physical)->numPages;
++ *Logical = _CreateKernelVirtualMapping((PLINUX_MDL)Physical);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DestroyKernelVirtualMapping(
++ IN gctPOINTER Logical
++ )
++{
++ _DestoryKernelVirtualMapping((gctSTRING)Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Allocate
++**
++** Allocate memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_Allocate(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Do we have a heap? */
++ if (Os->heap != gcvNULL)
++ {
++ /* Allocate from the heap. */
++ gcmkONERROR(gckHEAP_Allocate(Os->heap, Bytes, Memory));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_AllocateMemory(Os, Bytes, Memory));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Free
++**
++** Free allocated memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Free(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Memory=0x%X", Os, Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Do we have a heap? */
++ if (Os->heap != gcvNULL)
++ {
++ /* Free from the heap. */
++ gcmkONERROR(gckHEAP_Free(Os->heap, Memory));
++ }
++ else
++ {
++ gcmkONERROR(gckOS_FreeMemory(Os, Memory));
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateMemory
++**
++** Allocate memory wrapper.
++**
++** INPUT:
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the allocated memory location.
++*/
++gceSTATUS
++gckOS_AllocateMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Memory
++ )
++{
++ gctPOINTER memory;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ if (Bytes > PAGE_SIZE)
++ {
++ memory = (gctPOINTER) vmalloc(Bytes);
++ }
++ else
++ {
++ memory = (gctPOINTER) kmalloc(Bytes, GFP_KERNEL | gcdNOWARN);
++ }
++
++ if (memory == gcvNULL)
++ {
++ /* Out of memory. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Return pointer to the memory allocation. */
++ *Memory = memory;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Memory=0x%X", *Memory);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeMemory
++**
++** Free allocated memory wrapper.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory allocation to free.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeMemory(
++ IN gckOS Os,
++ IN gctPOINTER Memory
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X", Memory);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++
++ /* Free the memory from the OS pool. */
++ if (is_vmalloc_addr(Memory))
++ {
++ vfree(Memory);
++ }
++ else
++ {
++ kfree(Memory);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapMemory
++**
++** Map physical memory into the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Memory
++** Pointer to a variable that will hold the logical address of the
++** mapped memory.
++*/
++gceSTATUS
++gckOS_MapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (char *)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (char *)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): mdl->numPages: %d mdl->vmaAddr: 0x%X",
++ __FUNCTION__, __LINE__,
++ mdl->numPages,
++ mdlMap->vmaAddr
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (!mdlMap->vma)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): find_vma error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ up_write(&current->mm->mmap_sem);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_coherent(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): dma_mmap_coherent error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#else
++#if !gcdPAGED_MEMORY_CACHEABLE
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++# endif
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages*PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): remap_pfn_range error.",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++#endif
++
++ up_write(&current->mm->mmap_sem);
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ *Logical = mdlMap->vmaAddr;
++
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemory
++**
++** Unmap physical memory out of the current process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemoryEx(Os, Physical, Bytes, Logical, _GetProcessID());
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_UnmapMemoryEx
++**
++** Unmap physical memory in the specified process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** gctUINT32 PID
++** Pid of the process that opened the device and mapped this memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapMemoryEx(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical,
++ IN gctUINT32 PID
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X PID=%d",
++ Os, Physical, Bytes, Logical, PID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PID != 0);
++
++ MEMORY_LOCK(Os);
++
++ if (Logical)
++ {
++ mdlMap = FindMdlMap(mdl, PID);
++
++ if (mdlMap == gcvNULL || mdlMap->vmaAddr == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ _UnmapUserLogical(PID, mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
++
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserLogical
++**
++** Unmap user logical memory out of physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Start of physical address memory.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** gctPOINTER Memory
++** Pointer to a previously mapped memory region.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserLogical(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ gckOS_UnmapMemory(Os, Physical, Bytes, Logical);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocateNonPagedMemory
++**
++** Allocate a number of pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that holds the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that hold the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that will hold the physical address of the
++** allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that will hold the logical address of the
++** allocation.
++*/
++gceSTATUS
++gckOS_AllocateNonPagedMemory(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctSIZE_T bytes;
++ gctINT numPages;
++ PLINUX_MDL mdl = gcvNULL;
++ PLINUX_MDL_MAP mdlMap = gcvNULL;
++ gctSTRING addr;
++#ifdef NO_DMA_COHERENT
++ struct page * page;
++ long size, order;
++ gctPOINTER vaddr;
++#endif
++ gctBOOL locked = gcvFALSE;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Align number of bytes to page size. */
++ bytes = gcmALIGN(*Bytes, PAGE_SIZE);
++
++ /* Get total number of pages.. */
++ numPages = GetPageCount(bytes, 0);
++
++ /* Allocate mdl+vector structure */
++ mdl = _CreateMdl(_GetProcessID());
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->pagedMem = 0;
++ mdl->numPages = numPages;
++
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++
++#ifndef NO_DMA_COHERENT
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ addr = _GetNonPagedMemoryCache(Os,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle);
++
++ if (addr == gcvNULL)
++#endif
++ {
++ addr = dma_zalloc_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle,
++ GFP_KERNEL | gcdNOWARN);
++ }
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if(addr == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++ locked = gcvFALSE;
++ /*Free all cache and try again*/
++ _FreeAllNonPagedMemoryCache(Os);
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++ addr = dma_zalloc_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ &mdl->dmaHandle,
++ GFP_KERNEL | gcdNOWARN);
++ }
++#endif
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ order = get_order(size);
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ page = _GetNonPagedMemoryCache(Os, order);
++
++ if (page == gcvNULL)
++#endif
++ {
++ page = alloc_pages(GFP_KERNEL | gcdNOWARN, order);
++ }
++
++ if (page == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ vaddr = (gctPOINTER)page_address(page);
++ mdl->contiguous = gcvTRUE;
++ mdl->u.contiguousPages = page;
++ addr = _CreateKernelVirtualMapping(mdl);
++ mdl->dmaHandle = virt_to_phys(vaddr);
++ mdl->kaddr = vaddr;
++ mdl->u.contiguousPages = page;
++
++#if !defined(CONFIG_PPC)
++ /* Cache invalidate. */
++ dma_sync_single_for_device(
++ gcvNULL,
++ page_to_phys(page),
++ bytes,
++ DMA_FROM_DEVICE);
++#endif
++
++ while (size > 0)
++ {
++ SetPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++#endif
++
++ if (addr == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->addr = addr;
++
++ /* Return allocated memory. */
++ *Bytes = bytes;
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ if (InUserSpace)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Only after mmap this will be valid. */
++
++ /* We need to map this to user space. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING) vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING) do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#ifndef NO_DMA_COHERENT
++ if (dma_mmap_coherent(gcvNULL,
++ mdlMap->vma,
++ mdl->addr,
++ mdl->dmaHandle,
++ mdl->numPages * PAGE_SIZE) < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): dma_mmap_coherent error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#else
++ mdlMap->vma->vm_page_prot = gcmkNONPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++ mdlMap->vma->vm_pgoff = 0;
++
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ mdl->dmaHandle >> PAGE_SHIFT,
++ mdl->numPages * PAGE_SIZE,
++ mdlMap->vma->vm_page_prot))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_WARNING, gcvZONE_OS,
++ "%s(%d): remap_pfn_range error",
++ __FUNCTION__, __LINE__
++ );
++
++ up_write(&current->mm->mmap_sem);
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++#endif /* NO_DMA_COHERENT */
++
++ up_write(&current->mm->mmap_sem);
++
++ *Logical = mdlMap->vmaAddr;
++ }
++ else
++ {
++ *Logical = (gctPOINTER)mdl->addr;
++ }
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to the tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdlMap != gcvNULL)
++ {
++ /* Free LINUX_MDL_MAP. */
++ gcmkVERIFY_OK(_DestroyMdlMap(mdl, mdlMap));
++ }
++
++ if (mdl != gcvNULL)
++ {
++ /* Free LINUX_MDL. */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++ }
++
++ if (locked)
++ {
++ /* Unlock memory. */
++ MEMORY_UNLOCK(Os);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeNonPagedMemory
++**
++** Free previously allocated and mapped pages from non-paged memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes allocated.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocated memory.
++**
++** gctPOINTER Logical
++** Logical address of the allocated memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS gckOS_FreeNonPagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++#ifdef NO_DMA_COHERENT
++ unsigned size;
++ gctPOINTER vaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu Physical=0x%X Logical=0x%X",
++ Os, Bytes, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Convert physical address into a pointer to a MDL. */
++ mdl = (PLINUX_MDL) Physical;
++
++ MEMORY_LOCK(Os);
++
++#ifndef NO_DMA_COHERENT
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if (!_AddNonPagedMemoryCache(Os,
++ mdl->numPages * PAGE_SIZE,
++ mdl->addr,
++ mdl->dmaHandle))
++#endif
++ {
++ dma_free_coherent(gcvNULL,
++ mdl->numPages * PAGE_SIZE,
++ mdl->addr,
++ mdl->dmaHandle);
++ }
++#else
++ size = mdl->numPages * PAGE_SIZE;
++ vaddr = mdl->kaddr;
++
++ while (size > 0)
++ {
++ ClearPageReserved(virt_to_page(vaddr));
++
++ vaddr += PAGE_SIZE;
++ size -= PAGE_SIZE;
++ }
++
++#if gcdUSE_NON_PAGED_MEMORY_CACHE
++ if (!_AddNonPagedMemoryCache(Os,
++ get_order(mdl->numPages * PAGE_SIZE),
++ virt_to_page(mdl->kaddr)))
++#endif
++ {
++ free_pages((unsigned long)mdl->kaddr, get_order(mdl->numPages * PAGE_SIZE));
++ }
++
++ _DestoryKernelVirtualMapping(mdl->addr);
++#endif /* NO_DMA_COHERENT */
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if (mdlMap->vmaAddr != gcvNULL)
++ {
++ /* No mapped memory exists when free nonpaged memory */
++ gcmkASSERT(0);
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ /* Remove the node from global list.. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReadRegister
++**
++** Read data from a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** OUTPUT:
++**
++** gctUINT32 * Data
++** Pointer to a variable that receives the data read from the register.
++*/
++gceSTATUS
++gckOS_ReadRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ return gckOS_ReadRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_ReadRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctUINT32 * Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X", Os, Core, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++ gcmkVERIFY_ARGUMENT(Data != gcvNULL);
++
++ if(Address != 0x10) mutex_lock(&Os->gpu_clk_mutex);
++ BUG_ON(!Os->gpu_clk_on[Core]);
++
++ if(Address)
++ {
++ gctUINT32 AQHiClockControl = readl((gctUINT8 *)Os->device->registerBases[Core]);
++ BUG_ON((AQHiClockControl & 0x3) == 0x3);
++ }
++
++ *Data = readl((gctUINT8 *)Os->device->registerBases[Core] + Address);
++ if(Address != 0x10) mutex_unlock(&Os->gpu_clk_mutex);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Data=0x%08x", *Data);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_WriteRegister
++**
++** Write data to a register.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Address of register.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteRegister(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ return gckOS_WriteRegisterEx(Os, gcvCORE_MAJOR, Address, Data);
++}
++
++gceSTATUS
++gckOS_WriteRegisterEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ IN gctUINT32 Data
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X Data=0x%08x", Os, Core, Address, Data);
++
++ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
++
++ mutex_lock(&Os->gpu_clk_mutex);
++ BUG_ON(!Os->gpu_clk_on[Core]);
++
++ if(Address)
++ {
++ gctUINT32 AQHiClockControl = readl((gctUINT8 *)Os->device->registerBases[Core]);
++ BUG_ON((AQHiClockControl & 0x3) == 0x3);
++ }
++
++ writel(Data, (gctUINT8 *)Os->device->registerBases[Core] + Address);
++ mutex_unlock(&Os->gpu_clk_mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPageSize
++**
++** Get the system's page size.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctSIZE_T * PageSize
++** Pointer to a variable that will receive the system's page size.
++*/
++gceSTATUS gckOS_GetPageSize(
++ IN gckOS Os,
++ OUT gctSIZE_T * PageSize
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(PageSize != gcvNULL);
++
++ /* Return the page size. */
++ *PageSize = (gctSIZE_T) PAGE_SIZE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*PageSize", *PageSize);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddress
++**
++** Get the physical system address of a corresponding virtual address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddress(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ OUT gctUINT32 * Address
++ )
++{
++ gceSTATUS status;
++ gctUINT32 processID;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X", Os, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Query page table of current process first. */
++ status = _QueryProcessPageTable(Logical, Address);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Get current process ID. */
++ processID = _GetProcessID();
++
++ /* Route through other function. */
++ gcmkONERROR(
++ gckOS_GetPhysicalAddressProcess(Os, Logical, processID, Address));
++ }
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdSECURE_USER
++static gceSTATUS
++gckOS_AddMapping(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ gcmkONERROR(gckOS_Allocate(Os,
++ gcmSIZEOF(gcsUSER_MAPPING),
++ (gctPOINTER *) &map));
++
++ map->next = Os->userMap;
++ map->physical = Physical - Os->device->baseAddress;
++ map->logical = Logical;
++ map->bytes = Bytes;
++ map->start = (gctINT8_PTR) Logical;
++ map->end = map->start + Bytes;
++
++ Os->userMap = map;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++static gceSTATUS
++gckOS_RemoveMapping(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++ gcsUSER_MAPPING_PTR map, prev;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ for (map = Os->userMap, prev = gcvNULL; map != gcvNULL; map = map->next)
++ {
++ if ((map->logical == Logical)
++ && (map->bytes == Bytes)
++ )
++ {
++ break;
++ }
++
++ prev = map;
++ }
++
++ if (map == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++
++ if (prev == gcvNULL)
++ {
++ Os->userMap = map->next;
++ }
++ else
++ {
++ prev->next = map->next;
++ }
++
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, map));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++static gceSTATUS
++_ConvertLogical2Physical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ IN PLINUX_MDL Mdl,
++ OUT gctUINT32_PTR Physical
++ )
++{
++ gctINT8_PTR base, vBase;
++ gctUINT32 offset;
++ PLINUX_MDL_MAP map;
++ gcsUSER_MAPPING_PTR userMap;
++
++ base = (Mdl == gcvNULL) ? gcvNULL : (gctINT8_PTR) Mdl->addr;
++
++ /* Check for the logical address match. */
++ if ((base != gcvNULL)
++ && ((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - base;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ /* paged memory is not mapped to kernel space. */
++ return gcvSTATUS_INVALID_ADDRESS;
++ }
++ else
++ {
++ *Physical = gcmPTR2INT(virt_to_phys(base)) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++
++ /* Walk user maps. */
++ for (userMap = Os->userMap; userMap != gcvNULL; userMap = userMap->next)
++ {
++ if (((gctINT8_PTR) Logical >= userMap->start)
++ && ((gctINT8_PTR) Logical < userMap->end)
++ )
++ {
++ *Physical = userMap->physical
++ + (gctUINT32) ((gctINT8_PTR) Logical - userMap->start);
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ if (ProcessID != Os->kernelProcessID)
++ {
++ map = FindMdlMap(Mdl, (gctINT) ProcessID);
++ vBase = (map == gcvNULL) ? gcvNULL : (gctINT8_PTR) map->vmaAddr;
++
++ /* Is the given address within that range. */
++ if ((vBase != gcvNULL)
++ && ((gctINT8_PTR) Logical >= vBase)
++ && ((gctINT8_PTR) Logical < vBase + Mdl->numPages * PAGE_SIZE)
++ )
++ {
++ offset = (gctINT8_PTR) Logical - vBase;
++
++ if (Mdl->dmaHandle != 0)
++ {
++ /* The memory was from coherent area. */
++ *Physical = (gctUINT32) Mdl->dmaHandle + offset;
++ }
++ else if (Mdl->pagedMem && !Mdl->contiguous)
++ {
++ *Physical = _NonContiguousToPhys(Mdl->u.nonContiguousPages, offset/PAGE_SIZE);
++ }
++ else
++ {
++ *Physical = page_to_phys(Mdl->u.contiguousPages) + offset;
++ }
++
++ return gcvSTATUS_OK;
++ }
++ }
++
++ /* Address not yet found. */
++ return gcvSTATUS_INVALID_ADDRESS;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetPhysicalAddressProcess
++**
++** Get the physical system address of a corresponding virtual address for a
++** given process.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctPOINTER Logical
++** Logical address.
++**
++** gctUINT32 ProcessID
++** Process ID.
++**
++** OUTPUT:
++**
++** gctUINT32 * Address
++** Poinetr to a variable that receives the 32-bit physical adress.
++*/
++gceSTATUS
++gckOS_GetPhysicalAddressProcess(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctUINT32 ProcessID,
++ OUT gctUINT32 * Address
++ )
++{
++ PLINUX_MDL mdl;
++ gctINT8_PTR base;
++ gceSTATUS status = gcvSTATUS_INVALID_ADDRESS;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X ProcessID=%d", Os, Logical, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* First try the contiguous memory pool. */
++ if (Os->device->contiguousMapped)
++ {
++ base = (gctINT8_PTR) Os->device->contiguousBase;
++
++ if (((gctINT8_PTR) Logical >= base)
++ && ((gctINT8_PTR) Logical < base + Os->device->contiguousSize)
++ )
++ {
++ /* Convert logical address into physical. */
++ *Address = Os->device->contiguousVidMem->baseAddress
++ + (gctINT8_PTR) Logical - base;
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ /* Try the contiguous memory pool. */
++ mdl = (PLINUX_MDL) Os->device->contiguousPhysical;
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Walk all MDLs. */
++ for (mdl = Os->mdlHead; mdl != gcvNULL; mdl = mdl->next)
++ {
++ /* Try this MDL. */
++ status = _ConvertLogical2Physical(Os,
++ Logical,
++ ProcessID,
++ mdl,
++ Address);
++ if (gcmIS_SUCCESS(status))
++ {
++ break;
++ }
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkONERROR(status);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Address=0x%08x", *Address);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPhysical
++**
++** Map a physical address into kernel space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Physical
++** Physical address of the memory to map.
++**
++** gctSIZE_T Bytes
++** Number of bytes to map.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the base address of the mapped
++** memory.
++*/
++gceSTATUS
++gckOS_MapPhysical(
++ IN gckOS Os,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Bytes,
++ OUT gctPOINTER * Logical
++ )
++{
++ gctPOINTER logical;
++ PLINUX_MDL mdl;
++ gctUINT32 physical = Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ /* Go through our mapping to see if we know this physical address already. */
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->dmaHandle != 0)
++ {
++ if ((physical >= mdl->dmaHandle)
++ && (physical < mdl->dmaHandle + mdl->numPages * PAGE_SIZE)
++ )
++ {
++ *Logical = mdl->addr + (physical - mdl->dmaHandle);
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ if (mdl == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool = Os->device->pool;
++
++ if (Physical >= pool->phys && Physical < pool->phys + pool->size)
++ logical = (gctPOINTER)(Physical - pool->phys + pool->virt);
++ else
++ logical = gcvNULL;
++#else
++ /* Map memory as cached memory. */
++ request_mem_region(physical, Bytes, "MapRegion");
++ logical = (gctPOINTER) ioremap_nocache(physical, Bytes);
++#endif
++
++ if (logical == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Failed to map physical address 0x%08x",
++ __FUNCTION__, __LINE__, Physical
++ );
++
++ MEMORY_UNLOCK(Os);
++
++ /* Out of resources. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ /* Return pointer to mapped memory. */
++ *Logical = logical;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X", *Logical);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapPhysical
++**
++** Unmap a previously mapped memory region from kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Logical
++** Pointer to the base address of the memory to unmap.
++**
++** gctSIZE_T Bytes
++** Number of bytes to unmap.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapPhysical(
++ IN gckOS Os,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl;
++
++ gcmkHEADER_ARG("Os=0x%X Logical=0x%X Bytes=%lu", Os, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ MEMORY_LOCK(Os);
++
++ mdl = Os->mdlHead;
++
++ while (mdl != gcvNULL)
++ {
++ if (mdl->addr != gcvNULL)
++ {
++ if (Logical >= (gctPOINTER)mdl->addr
++ && Logical < (gctPOINTER)((gctSTRING)mdl->addr + mdl->numPages * PAGE_SIZE))
++ {
++ break;
++ }
++ }
++
++ mdl = mdl->next;
++ }
++
++ if (mdl == gcvNULL)
++ {
++ /* Unmap the memory. */
++ iounmap(Logical);
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateMutex
++**
++** Create a new mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Mutex
++** Pointer to a variable that will hold a pointer to the mutex.
++*/
++gceSTATUS
++gckOS_CreateMutex(
++ IN gckOS Os,
++ OUT gctPOINTER * Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Allocate the mutex structure. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct mutex), Mutex));
++
++ /* Initialize the mutex. */
++ mutex_init(*Mutex);
++
++ /* Return status. */
++ gcmkFOOTER_ARG("*Mutex=0x%X", *Mutex);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DeleteMutex
++**
++** Delete a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mute to be deleted.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DeleteMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%X", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Destroy the mutex. */
++ mutex_destroy(Mutex);
++
++ /* Free the mutex structure. */
++ gcmkONERROR(gckOS_Free(Os, Mutex));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireMutex
++**
++** Acquire a mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be acquired.
++**
++** gctUINT32 Timeout
++** Timeout value specified in milliseconds.
++** Specify the value of gcvINFINITE to keep the thread suspended
++** until the mutex has been acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex,
++ IN gctUINT32 Timeout
++ )
++{
++#if gcdDETECT_TIMEOUT
++ gctUINT32 timeout;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x Timeout=%u", Os, Mutex, Timeout);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++#if gcdDETECT_TIMEOUT
++ timeout = 0;
++
++ for (;;)
++ {
++ /* Try to acquire the mutex. */
++ if (mutex_trylock(Mutex))
++ {
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ /* Advance the timeout. */
++ timeout += 1;
++
++ if (Timeout == gcvINFINITE)
++ {
++ if (timeout == gcdINFINITE_TIMEOUT)
++ {
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkVERIFY_OK(_VerifyDMA(
++ Os, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++#if gcdDETECT_DMA_ADDRESS
++ /* Dump only if DMA appears stuck. */
++ if (
++ (dmaAddress1 == dmaAddress2)
++#if gcdDETECT_DMA_STATE
++ && (dmaState1 == dmaState2)
++# endif
++ )
++# endif
++ {
++ gcmkVERIFY_OK(_DumpGPUState(Os, gcvCORE_MAJOR));
++
++ gcmkPRINT(
++ "%s(%d): mutex 0x%X; forced message flush.",
++ __FUNCTION__, __LINE__, Mutex
++ );
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(dmaAddress2);
++ }
++
++ timeout = 0;
++ }
++ }
++ else
++ {
++ /* Timedout? */
++ if (timeout >= Timeout)
++ {
++ break;
++ }
++ }
++
++ /* Wait for 1 millisecond. */
++ gcmkVERIFY_OK(gckOS_Delay(Os, 1));
++ }
++#else
++ if (Timeout == gcvINFINITE)
++ {
++ /* Lock the mutex. */
++ mutex_lock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ for (;;)
++ {
++ /* Try to acquire the mutex. */
++ if (mutex_trylock(Mutex))
++ {
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ if (Timeout-- == 0)
++ {
++ break;
++ }
++
++ /* Wait for 1 millisecond. */
++ gcmkVERIFY_OK(gckOS_Delay(Os, 1));
++ }
++#endif
++
++ /* Timeout. */
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_TIMEOUT);
++ return gcvSTATUS_TIMEOUT;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseMutex
++**
++** Release an acquired mutex.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Mutex
++** Pointer to the mutex to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseMutex(
++ IN gckOS Os,
++ IN gctPOINTER Mutex
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Mutex=0x%0x", Os, Mutex);
++
++ /* Validate the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Mutex != gcvNULL);
++
++ /* Release the mutex. */
++ mutex_unlock(Mutex);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchange
++**
++** Atomically exchange a pair of 32-bit values.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctINT32_PTR Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctINT32 NewValue
++** Specifies a new value for the 32-bit value pointed to by Target.
++**
++** OUT gctINT32_PTR OldValue
++** The old value of the 32-bit value pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchange(
++ IN gckOS Os,
++ IN OUT gctUINT32_PTR Target,
++ IN gctUINT32 NewValue,
++ OUT gctUINT32_PTR OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=%u", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Exchange the pair of 32-bit values. */
++ *OldValue = (gctUINT32) atomic_xchg((atomic_t *) Target, (int) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=%u", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomicExchangePtr
++**
++** Atomically exchange a pair of pointers.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** IN OUT gctPOINTER * Target
++** Pointer to the 32-bit value to exchange.
++**
++** IN gctPOINTER NewValue
++** Specifies a new value for the pointer pointed to by Target.
++**
++** OUT gctPOINTER * OldValue
++** The old value of the pointer pointed to by Target.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomicExchangePtr(
++ IN gckOS Os,
++ IN OUT gctPOINTER * Target,
++ IN gctPOINTER NewValue,
++ OUT gctPOINTER * OldValue
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Target=0x%X NewValue=0x%X", Os, Target, NewValue);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Exchange the pair of pointers. */
++ *OldValue = (gctPOINTER)(gctUINTPTR_T) atomic_xchg((atomic_t *) Target, (int)(gctUINTPTR_T) NewValue);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*OldValue=0x%X", *OldValue);
++ return gcvSTATUS_OK;
++}
++
++#if gcdSMP
++/*******************************************************************************
++**
++** gckOS_AtomicSetMask
++**
++** Atomically set mask to Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to set.
++**
++** IN gctUINT32 Mask
++** Mask to set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSetMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval | Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomClearMask
++**
++** Atomically clear mask from Atom
++**
++** INPUT:
++** IN OUT gctPOINTER Atom
++** Pointer to the atom to clear.
++**
++** IN gctUINT32 Mask
++** Mask to clear.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomClearMask(
++ IN gctPOINTER Atom,
++ IN gctUINT32 Mask
++ )
++{
++ gctUINT32 oval, nval;
++
++ gcmkHEADER_ARG("Atom=0x%0x", Atom);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ do
++ {
++ oval = atomic_read((atomic_t *) Atom);
++ nval = oval & ~Mask;
++ } while (atomic_cmpxchg((atomic_t *) Atom, oval, nval) != oval);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_AtomConstruct
++**
++** Create an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Atom
++** Pointer to a variable receiving the constructed atom.
++*/
++gceSTATUS
++gckOS_AtomConstruct(
++ IN gckOS Os,
++ OUT gctPOINTER * Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Allocate the atom. */
++ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(atomic_t), Atom));
++
++ /* Initialize the atom. */
++ atomic_set((atomic_t *) *Atom, 0);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Atom=0x%X", *Atom);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDestroy
++**
++** Destroy an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom to destroy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomDestroy(
++ IN gckOS Os,
++ OUT gctPOINTER Atom
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Free the atom. */
++ gcmkONERROR(gcmkOS_SAFE_FREE(Os, Atom));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomGet
++**
++** Get the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable the receives the value of the atom.
++*/
++gceSTATUS
++gckOS_AtomGet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Return the current value of atom. */
++ *Value = atomic_read((atomic_t *) Atom);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomSet
++**
++** Set the 32-bit value protected by an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** gctINT32 Value
++** The value of the atom.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AtomSet(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ IN gctINT32 Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x Value=%d", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Set the current value of atom. */
++ atomic_set((atomic_t *) Atom, Value);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomIncrement
++**
++** Atomically increment the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomIncrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Increment the atom. */
++ *Value = atomic_inc_return((atomic_t *) Atom) - 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AtomDecrement
++**
++** Atomically decrement the 32-bit integer value inside an atom.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gctPOINTER Atom
++** Pointer to the atom.
++**
++** OUTPUT:
++**
++** gctINT32_PTR Value
++** Pointer to a variable that receives the original value of the atom.
++*/
++gceSTATUS
++gckOS_AtomDecrement(
++ IN gckOS Os,
++ IN gctPOINTER Atom,
++ OUT gctINT32_PTR Value
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Atom=0x%0x", Os, Atom);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Atom != gcvNULL);
++
++ /* Decrement the atom. */
++ *Value = atomic_dec_return((atomic_t *) Atom) + 1;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Value=%d", *Value);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_Delay
++**
++** Delay execution of the current thread for a number of milliseconds.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Delay
++** Delay to sleep, specified in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Delay(
++ IN gckOS Os,
++ IN gctUINT32 Delay
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Delay=%u", Os, Delay);
++
++ if (Delay > 0)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
++ ktime_t delay = ktime_set(Delay/1000, (Delay%1000) * NSEC_PER_MSEC);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
++#else
++ msleep(Delay);
++#endif
++
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTicks
++**
++** Get the number of milliseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT32_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTicks(
++ OUT gctUINT32_PTR Time
++ )
++{
++ gcmkHEADER();
++
++ *Time = jiffies_to_msecs(jiffies);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_TicksAfter
++**
++** Compare time values got from gckOS_GetTicks.
++**
++** INPUT:
++** gctUINT32 Time1
++** First time value to be compared.
++**
++** gctUINT32 Time2
++** Second time value to be compared.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR IsAfter
++** Pointer to a variable to result.
++**
++*/
++gceSTATUS
++gckOS_TicksAfter(
++ IN gctUINT32 Time1,
++ IN gctUINT32 Time2,
++ OUT gctBOOL_PTR IsAfter
++ )
++{
++ gcmkHEADER();
++
++ *IsAfter = time_after((unsigned long)Time1, (unsigned long)Time2);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetTime
++**
++** Get the number of microseconds since the system started.
++**
++** INPUT:
++**
++** OUTPUT:
++**
++** gctUINT64_PTR Time
++** Pointer to a variable to get time.
++**
++*/
++gceSTATUS
++gckOS_GetTime(
++ OUT gctUINT64_PTR Time
++ )
++{
++ gcmkHEADER();
++
++ *Time = 0;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MemoryBarrier
++**
++** Make sure the CPU has executed everything up to this point and the data got
++** written to the specified pointer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of memory that needs to be barriered.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MemoryBarrier(
++ IN gckOS Os,
++ IN gctPOINTER Address
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X", Os, Address);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++#if gcdNONPAGED_MEMORY_BUFFERABLE \
++ && defined (CONFIG_ARM) \
++ && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34))
++ /* drain write buffer */
++ dsb();
++
++ /* drain outer cache's write buffer? */
++#else
++ mb();
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemory
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemory(
++ IN gckOS Os,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Bytes=%lu", Os, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ /* Allocate the memory. */
++ gcmkONERROR(gckOS_AllocatePagedMemoryEx(Os, gcvFALSE, Bytes, Physical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AllocatePagedMemoryEx
++**
++** Allocate memory from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL Contiguous
++** Need contiguous memory or not.
++**
++** gctSIZE_T Bytes
++** Number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocatePagedMemoryEx(
++ IN gckOS Os,
++ IN gctBOOL Contiguous,
++ IN gctSIZE_T Bytes,
++ OUT gctPHYS_ADDR * Physical
++ )
++{
++ gctINT numPages;
++ gctINT i;
++ PLINUX_MDL mdl = gcvNULL;
++ gctSIZE_T bytes;
++ gctBOOL locked = gcvFALSE;
++ gceSTATUS status;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctPOINTER addr = gcvNULL;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Contiguous=%d Bytes=%lu", Os, Contiguous, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++
++ bytes = gcmALIGN(Bytes, PAGE_SIZE);
++
++ numPages = GetPageCount(bytes, 0);
++
++ MEMORY_LOCK(Os);
++ locked = gcvTRUE;
++
++ mdl = _CreateMdl(_GetProcessID());
++ if (mdl == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (Contiguous)
++ {
++ gctUINT32 order = get_order(bytes);
++
++ if (order >= MAX_ORDER)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ addr =
++ alloc_pages_exact(numPages * PAGE_SIZE, GFP_KERNEL | gcdNOWARN | __GFP_NORETRY);
++
++ mdl->u.contiguousPages = addr
++ ? virt_to_page(addr)
++ : gcvNULL;
++
++ mdl->exact = gcvTRUE;
++#else
++ mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | gcdNOWARN | __GFP_NORETRY, order);
++#endif
++ if (mdl->u.contiguousPages == gcvNULL)
++ {
++ mdl->u.contiguousPages =
++ alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN, order);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ mdl->exact = gcvFALSE;
++#endif
++ }
++ }
++ else
++ {
++ mdl->u.nonContiguousPages = _NonContiguousAlloc(numPages);
++ }
++
++ if (mdl->u.contiguousPages == gcvNULL && mdl->u.nonContiguousPages == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ mdl->dmaHandle = 0;
++ mdl->addr = 0;
++ mdl->numPages = numPages;
++ mdl->pagedMem = 1;
++ mdl->contiguous = Contiguous;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ struct page *page;
++
++ if (mdl->contiguous)
++ {
++ page = nth_page(mdl->u.contiguousPages, i);
++ }
++ else
++ {
++ page = _NonContiguousToPage(mdl->u.nonContiguousPages, i);
++ }
++
++ SetPageReserved(page);
++
++ if (!PageHighMem(page) && page_to_phys(page))
++ {
++ gcmkVERIFY_OK(
++ gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
++ (gctPOINTER)(gctUINTPTR_T)page_to_phys(page),
++ page_address(page),
++ PAGE_SIZE));
++ }
++ }
++
++ /* Return physical address. */
++ *Physical = (gctPHYS_ADDR) mdl;
++
++ /*
++ * Add this to a global list.
++ * Will be used by get physical address
++ * and mapuser pointer functions.
++ */
++ if (!Os->mdlHead)
++ {
++ /* Initialize the queue. */
++ Os->mdlHead = Os->mdlTail = mdl;
++ }
++ else
++ {
++ /* Add to tail. */
++ mdl->prev = Os->mdlTail;
++ Os->mdlTail->next = mdl;
++ Os->mdlTail = mdl;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Physical=0x%X", *Physical);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (mdl != gcvNULL)
++ {
++ /* Free the memory. */
++ _DestroyMdl(mdl);
++ }
++
++ if (locked)
++ {
++ /* Unlock the memory. */
++ MEMORY_UNLOCK(Os);
++ }
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreePagedMemory
++**
++** Free memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreePagedMemory(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes
++ )
++{
++ PLINUX_MDL mdl = (PLINUX_MDL) Physical;
++ gctINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /*addr = mdl->addr;*/
++
++ MEMORY_LOCK(Os);
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ if (mdl->contiguous)
++ {
++ ClearPageReserved(nth_page(mdl->u.contiguousPages, i));
++ }
++ else
++ {
++ ClearPageReserved(_NonContiguousToPage(mdl->u.nonContiguousPages, i));
++ }
++ }
++
++ if (mdl->contiguous)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ if (mdl->exact == gcvTRUE)
++ {
++ free_pages_exact(page_address(mdl->u.contiguousPages), mdl->numPages * PAGE_SIZE);
++ }
++ else
++#endif
++ {
++ __free_pages(mdl->u.contiguousPages, GetOrder(mdl->numPages));
++ }
++ }
++ else
++ {
++ _NonContiguousFree(mdl->u.nonContiguousPages, mdl->numPages);
++ }
++
++ /* Remove the node from global list. */
++ if (mdl == Os->mdlHead)
++ {
++ if ((Os->mdlHead = mdl->next) == gcvNULL)
++ {
++ Os->mdlTail = gcvNULL;
++ }
++ }
++ else
++ {
++ mdl->prev->next = mdl->next;
++
++ if (mdl == Os->mdlTail)
++ {
++ Os->mdlTail = mdl->prev;
++ }
++ else
++ {
++ mdl->next->prev = mdl->prev;
++ }
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Free the structure... */
++ gcmkVERIFY_OK(_DestroyMdl(mdl));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_LockPages
++**
++** Lock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctBOOL Cacheable
++** Cache mode of mapping.
++**
++** OUTPUT:
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the address of the mapped
++** memory.
++**
++** gctSIZE_T * PageCount
++** Pointer to a variable that receives the number of pages required for
++** the page table according to the GPU page size.
++*/
++gceSTATUS
++gckOS_LockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctBOOL Cacheable,
++ OUT gctPOINTER * Logical,
++ OUT gctSIZE_T * PageCount
++ )
++{
++ PLINUX_MDL mdl;
++ PLINUX_MDL_MAP mdlMap;
++ gctSTRING addr;
++ unsigned long start;
++ unsigned long pfn;
++ gctINT i;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%lu", Os, Physical, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount != gcvNULL);
++
++ mdl = (PLINUX_MDL) Physical;
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = FindMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ mdlMap = _CreateMdlMap(mdl, _GetProcessID());
++
++ if (mdlMap == gcvNULL)
++ {
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++
++ if (mdlMap->vmaAddr == gcvNULL)
++ {
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
++ mdlMap->vmaAddr = (gctSTRING)vm_mmap(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++#else
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vmaAddr = (gctSTRING)do_mmap_pgoff(gcvNULL,
++ 0L,
++ mdl->numPages * PAGE_SIZE,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED,
++ 0);
++
++ up_write(&current->mm->mmap_sem);
++#endif
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): vmaAddr->0x%X for phys_addr->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)mdlMap->vmaAddr,
++ (gctUINT32)(gctUINTPTR_T)mdl
++ );
++
++ if (IS_ERR(mdlMap->vmaAddr))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): do_mmap_pgoff error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ down_write(&current->mm->mmap_sem);
++
++ mdlMap->vma = find_vma(current->mm, (unsigned long)mdlMap->vmaAddr);
++
++ if (mdlMap->vma == gcvNULL)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): find_vma error",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_RESOURCES);
++ return gcvSTATUS_OUT_OF_RESOURCES;
++ }
++
++ mdlMap->vma->vm_flags |= gcdVM_FLAGS;
++
++ if (Cacheable == gcvFALSE)
++ {
++ /* Make this mapping non-cached. */
++ mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
++ }
++
++ addr = mdl->addr;
++
++ /* Now map all the vmalloc pages to this user address. */
++ if (mdl->contiguous)
++ {
++ /* map kernel memory to user space.. */
++ if (remap_pfn_range(mdlMap->vma,
++ mdlMap->vma->vm_start,
++ page_to_pfn(mdl->u.contiguousPages),
++ mdlMap->vma->vm_end - mdlMap->vma->vm_start,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): unable to mmap ret",
++ __FUNCTION__, __LINE__
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++ }
++ else
++ {
++ start = mdlMap->vma->vm_start;
++
++ for (i = 0; i < mdl->numPages; i++)
++ {
++ pfn = _NonContiguousToPfn(mdl->u.nonContiguousPages, i);
++
++ if (remap_pfn_range(mdlMap->vma,
++ start,
++ pfn,
++ PAGE_SIZE,
++ mdlMap->vma->vm_page_prot) < 0)
++ {
++ up_write(&current->mm->mmap_sem);
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): gctPHYS_ADDR->0x%X Logical->0x%X Unable to map addr->0x%X to start->0x%X",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)Physical,
++ (gctUINT32)(gctUINTPTR_T)*Logical,
++ (gctUINT32)(gctUINTPTR_T)addr,
++ (gctUINT32)(gctUINTPTR_T)start
++ );
++
++ mdlMap->vmaAddr = gcvNULL;
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ start += PAGE_SIZE;
++ addr += PAGE_SIZE;
++ }
++ }
++
++ up_write(&current->mm->mmap_sem);
++ }
++
++ mdlMap->count++;
++
++ /* Convert pointer to MDL. */
++ *Logical = mdlMap->vmaAddr;
++
++ /* Return the page number according to the GPU page size. */
++ gcmkASSERT((PAGE_SIZE % 4096) == 0);
++ gcmkASSERT((PAGE_SIZE / 4096) >= 1);
++
++ *PageCount = mdl->numPages * (PAGE_SIZE / 4096);
++
++ MEMORY_UNLOCK(Os);
++
++ gcmkVERIFY_OK(gckOS_CacheFlush(
++ Os,
++ _GetProcessID(),
++ Physical,
++ gcvNULL,
++ (gctPOINTER)mdlMap->vmaAddr,
++ mdl->numPages * PAGE_SIZE
++ ));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Logical=0x%X *PageCount=%lu", *Logical, *PageCount);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapPages
++**
++** Map paged memory into a page table.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T PageCount
++** Number of pages required for the physical address.
++**
++** gctPOINTER PageTable
++** Pointer to the page table to fill in.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_MapPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ )
++{
++ return gckOS_MapPagesEx(Os,
++ gcvCORE_MAJOR,
++ Physical,
++ PageCount,
++ PageTable);
++}
++
++gceSTATUS
++gckOS_MapPagesEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T PageCount,
++ IN gctPOINTER PageTable
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ PLINUX_MDL mdl;
++ gctUINT32* table;
++ gctUINT32 offset;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ gckMMU mmu;
++ PLINUX_MDL mmuMdl;
++ gctUINT32 bytes;
++ gctPHYS_ADDR pageTablePhysical;
++#endif
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Physical=0x%X PageCount=%u PageTable=0x%X",
++ Os, Core, Physical, PageCount, PageTable);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(PageCount > 0);
++ gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
++
++ /* Convert pointer to MDL. */
++ mdl = (PLINUX_MDL)Physical;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Physical->0x%X PageCount->0x%X PagedMemory->?%d",
++ __FUNCTION__, __LINE__,
++ (gctUINT32)(gctUINTPTR_T)Physical,
++ (gctUINT32)(gctUINTPTR_T)PageCount,
++ mdl->pagedMem
++ );
++
++ MEMORY_LOCK(Os);
++
++ table = (gctUINT32 *)PageTable;
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ mmu = Os->device->kernels[Core]->mmu;
++ bytes = PageCount * sizeof(*table);
++ mmuMdl = (PLINUX_MDL)mmu->pageTablePhysical;
++#endif
++
++ /* Get all the physical addresses and store them in the page table. */
++
++ offset = 0;
++
++ if (mdl->pagedMem)
++ {
++ /* Try to get the user pages so DMA can happen. */
++ while (PageCount-- > 0)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ if (mdl->contiguous)
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ _NonContiguousToPhys(mdl->u.nonContiguousPages, offset),
++ table));
++ }
++ }
++ else
++#endif
++ {
++ if (mdl->contiguous)
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ _NonContiguousToPhys(mdl->u.nonContiguousPages, offset),
++ table));
++ }
++ }
++
++ table++;
++ offset += 1;
++ }
++ }
++ else
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): we should not get this call for Non Paged Memory!",
++ __FUNCTION__, __LINE__
++ );
++
++ while (PageCount-- > 0)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ page_to_phys(nth_page(mdl->u.contiguousPages, offset)),
++ table));
++ }
++ table++;
++ offset += 1;
++ }
++ }
++
++#if gcdNONPAGED_MEMORY_CACHEABLE
++ /* Get physical address of pageTable */
++ pageTablePhysical = (gctPHYS_ADDR)(mmuMdl->dmaHandle +
++ ((gctUINT32 *)PageTable - mmu->pageTableLogical));
++
++ /* Flush the mmu page table cache. */
++ gcmkONERROR(gckOS_CacheClean(
++ Os,
++ _GetProcessID(),
++ gcvNULL,
++ pageTablePhysical,
++ PageTable,
++ bytes
++ ));
++#endif
++
++OnError:
++
++ MEMORY_UNLOCK(Os);
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnlockPages
++**
++** Unlock memory allocated from the paged pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** gctPOINTER Logical
++** Address of the mapped memory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnlockPages(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctSIZE_T Bytes,
++ IN gctPOINTER Logical
++ )
++{
++ PLINUX_MDL_MAP mdlMap;
++ PLINUX_MDL mdl = (PLINUX_MDL)Physical;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Bytes=%u Logical=0x%X",
++ Os, Physical, Bytes, Logical);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Make sure there is already a mapping...*/
++ gcmkVERIFY_ARGUMENT(mdl->u.nonContiguousPages != gcvNULL
++ || mdl->u.contiguousPages != gcvNULL);
++
++ MEMORY_LOCK(Os);
++
++ mdlMap = mdl->maps;
++
++ while (mdlMap != gcvNULL)
++ {
++ if ((mdlMap->vmaAddr != gcvNULL) && (_GetProcessID() == mdlMap->pid))
++ {
++ if (--mdlMap->count == 0)
++ {
++ _UnmapUserLogical(mdlMap->pid, mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
++ mdlMap->vmaAddr = gcvNULL;
++ }
++ }
++
++ mdlMap = mdlMap->next;
++ }
++
++ MEMORY_UNLOCK(Os);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++/*******************************************************************************
++**
++** gckOS_AllocateContiguous
++**
++** Allocate memory from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL InUserSpace
++** gcvTRUE if the pages need to be mapped into user space.
++**
++** gctSIZE_T * Bytes
++** Pointer to the number of bytes to allocate.
++**
++** OUTPUT:
++**
++** gctSIZE_T * Bytes
++** Pointer to a variable that receives the number of bytes allocated.
++**
++** gctPHYS_ADDR * Physical
++** Pointer to a variable that receives the physical address of the
++** memory allocation.
++**
++** gctPOINTER * Logical
++** Pointer to a variable that receives the logical address of the
++** memory allocation.
++*/
++gceSTATUS
++gckOS_AllocateContiguous(
++ IN gckOS Os,
++ IN gctBOOL InUserSpace,
++ IN OUT gctSIZE_T * Bytes,
++ OUT gctPHYS_ADDR * Physical,
++ OUT gctPOINTER * Logical
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X InUserSpace=%d *Bytes=%lu",
++ Os, InUserSpace, gcmOPT_VALUE(Bytes));
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Bytes != gcvNULL);
++ gcmkVERIFY_ARGUMENT(*Bytes > 0);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++
++ /* Same as non-paged memory for now. */
++ gcmkONERROR(gckOS_AllocateNonPagedMemory(Os,
++ InUserSpace,
++ Bytes,
++ Physical,
++ Logical));
++
++ /* Success. */
++ gcmkFOOTER_ARG("*Bytes=%lu *Physical=0x%X *Logical=0x%X",
++ *Bytes, *Physical, *Logical);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_FreeContiguous
++**
++** Free memory allocated from the contiguous pool.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPHYS_ADDR Physical
++** Physical address of the allocation.
++**
++** gctPOINTER Logical
++** Logicval address of the allocation.
++**
++** gctSIZE_T Bytes
++** Number of bytes of the allocation.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FreeContiguous(
++ IN gckOS Os,
++ IN gctPHYS_ADDR Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Physical=0x%X Logical=0x%X Bytes=%lu",
++ Os, Physical, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Physical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ /* Same of non-paged memory for now. */
++ gcmkONERROR(gckOS_FreeNonPagedMemory(Os, Bytes, Physical, Logical));
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++/******************************************************************************
++**
++** gckOS_GetKernelLogical
++**
++** Return the kernel logical pointer that corresponods to the specified
++** hardware address.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 Address
++** Hardware physical address.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the pointer in kernel address space.
++*/
++gceSTATUS
++gckOS_GetKernelLogical(
++ IN gckOS Os,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ return gckOS_GetKernelLogicalEx(Os, gcvCORE_MAJOR, Address, KernelPointer);
++}
++
++gceSTATUS
++gckOS_GetKernelLogicalEx(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT32 Address,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%08x", Os, Core, Address);
++
++ do
++ {
++ gckGALDEVICE device;
++ gckKERNEL kernel;
++ gcePOOL pool;
++ gctUINT32 offset;
++ gctPOINTER logical;
++
++ /* Extract the pointer to the gckGALDEVICE class. */
++ device = (gckGALDEVICE) Os->device;
++
++ /* Kernel shortcut. */
++ kernel = device->kernels[Core];
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkERR_BREAK(gckVGHARDWARE_SplitMemory(
++ kernel->vg->hardware, Address, &pool, &offset
++ ));
++ }
++ else
++#endif
++ {
++ /* Split the memory address into a pool type and offset. */
++ gcmkERR_BREAK(gckHARDWARE_SplitMemory(
++ kernel->hardware, Address, &pool, &offset
++ ));
++ }
++
++ /* Dispatch on pool. */
++ switch (pool)
++ {
++ case gcvPOOL_LOCAL_INTERNAL:
++ /* Internal memory. */
++ logical = device->internalLogical;
++ break;
++
++ case gcvPOOL_LOCAL_EXTERNAL:
++ /* External memory. */
++ logical = device->externalLogical;
++ break;
++
++ case gcvPOOL_SYSTEM:
++ /* System memory. */
++ logical = device->contiguousBase;
++ break;
++
++ default:
++ /* Invalid memory pool. */
++ gcmkFOOTER();
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ /* Build logical address of specified address. */
++ * KernelPointer = ((gctUINT8_PTR) logical) + offset;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ /* Return status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_MapUserPointer
++**
++** Map a pointer from the user process into the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be mapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be mapped.
++**
++** OUTPUT:
++**
++** gctPOINTER * KernelPointer
++** Pointer to a variable receiving the mapped pointer in kernel address
++** space.
++*/
++gceSTATUS
++gckOS_MapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * KernelPointer
++ )
++{
++ gctPOINTER buf = gcvNULL;
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu", Os, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ buf = kmalloc(Size, GFP_KERNEL | gcdNOWARN);
++ if (buf == gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to allocate memory.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_OUT_OF_MEMORY);
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ len = copy_from_user(buf, Pointer, Size);
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data from user.",
++ __FUNCTION__, __LINE__
++ );
++
++ if (buf != gcvNULL)
++ {
++ kfree(buf);
++ }
++
++ gcmkFOOTER_ARG("*status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ *KernelPointer = buf;
++
++ gcmkFOOTER_ARG("*KernelPointer=0x%X", *KernelPointer);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserPointer
++**
++** Unmap a user process pointer from the kernel address space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Pointer
++** Pointer in user process space that needs to be unmapped.
++**
++** gctSIZE_T Size
++** Number of bytes that need to be unmapped.
++**
++** gctPOINTER KernelPointer
++** Pointer in kernel address space that needs to be unmapped.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserPointer(
++ IN gckOS Os,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size,
++ IN gctPOINTER KernelPointer
++ )
++{
++ gctUINT32 len;
++
++ gcmkHEADER_ARG("Os=0x%X Pointer=0x%X Size=%lu KernelPointer=0x%X",
++ Os, Pointer, Size, KernelPointer);
++
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++
++ len = copy_to_user(Pointer, KernelPointer, Size);
++
++ kfree(KernelPointer);
++
++ if (len != 0)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): Failed to copy data to user.",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_GENERIC_IO);
++ return gcvSTATUS_GENERIC_IO;
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryNeedCopy
++**
++** Query whether the memory can be accessed or mapped directly or it has to be
++** copied.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID of the current process.
++**
++** OUTPUT:
++**
++** gctBOOL_PTR NeedCopy
++** Pointer to a boolean receiving gcvTRUE if the memory needs a copy or
++** gcvFALSE if the memory can be accessed or mapped dircetly.
++*/
++gceSTATUS
++gckOS_QueryNeedCopy(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ OUT gctBOOL_PTR NeedCopy
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d", Os, ProcessID);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(NeedCopy != gcvNULL);
++
++ /* We need to copy data. */
++ *NeedCopy = gcvTRUE;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*NeedCopy=%d", *NeedCopy);
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyFromUserData
++**
++** Copy data from user to kernel memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyFromUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data from user. */
++ if (copy_from_user(KernelPointer, Pointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_CopyToUserData
++**
++** Copy data from kernel to user memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER KernelPointer
++** Pointer to kernel memory.
++**
++** gctPOINTER Pointer
++** Pointer to user memory.
++**
++** gctSIZE_T Size
++** Number of bytes to copy.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_CopyToUserData(
++ IN gckOS Os,
++ IN gctPOINTER KernelPointer,
++ IN gctPOINTER Pointer,
++ IN gctSIZE_T Size
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X KernelPointer=0x%X Pointer=0x%X Size=%lu",
++ Os, KernelPointer, Pointer, Size);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(KernelPointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Pointer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++
++ /* Copy data to user. */
++ if (copy_to_user(Pointer, KernelPointer, Size) != 0)
++ {
++ /* Could not copy all the bytes. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WriteMemory
++**
++** Write data to a memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctPOINTER Address
++** Address of the memory to write to.
++**
++** gctUINT32 Data
++** Data for register.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WriteMemory(
++ IN gckOS Os,
++ IN gctPOINTER Address,
++ IN gctUINT32 Data
++ )
++{
++ gceSTATUS status;
++ gcmkHEADER_ARG("Os=0x%X Address=0x%X Data=%u", Os, Address, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ /* Write memory. */
++ if (access_ok(VERIFY_WRITE, Address, 4))
++ {
++ /* User address. */
++ if(put_user(Data, (gctUINT32*)Address))
++ {
++ gcmkONERROR(gcvSTATUS_INVALID_ADDRESS);
++ }
++ }
++ else
++ {
++ /* Kernel address. */
++ *(gctUINT32 *)Address = Data;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapUserMemory
++**
++** Lock down a user buffer and return an DMA'able address to be used by the
++** hardware to access it.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to lock down.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to lock down.
++**
++** OUTPUT:
++**
++** gctPOINTER * Info
++** Pointer to variable receiving the information record required by
++** gckOS_UnmapUserMemory.
++**
++** gctUINT32_PTR Address
++** Pointer to a variable that will receive the address DMA'able by the
++** hardware.
++*/
++gceSTATUS
++gckOS_MapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctUINT32 Physical,
++ IN gctSIZE_T Size,
++ OUT gctPOINTER * Info,
++ OUT gctUINT32_PTR Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x Core=%d Memory=0x%x Size=%lu", Os, Core, Memory, Size);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_AddMapping(Os, *Address, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctSIZE_T pageCount, i, j;
++ gctUINT32_PTR pageTable;
++ gctUINT32 address = 0, physical = ~0U;
++ gctUINTPTR_T start, end, memory;
++ gctUINT32 offset;
++ gctINT result = 0;
++
++ gcsPageInfo_PTR info = gcvNULL;
++ struct page **pages = gcvNULL;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL || Physical != ~0U);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Address != gcvNULL);
++
++ do
++ {
++ memory = (gctUINTPTR_T) Memory;
++
++ /* Get the number of required pages. */
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageCount: %d.",
++ __FUNCTION__, __LINE__,
++ pageCount
++ );
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ MEMORY_MAP_LOCK(Os);
++
++ /* Allocate the Info struct. */
++ info = (gcsPageInfo_PTR)kmalloc(sizeof(gcsPageInfo), GFP_KERNEL | gcdNOWARN);
++
++ if (info == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ /* Allocate the array of page addresses. */
++ pages = (struct page **)kmalloc(pageCount * sizeof(struct page *), GFP_KERNEL | gcdNOWARN);
++
++ if (pages == gcvNULL)
++ {
++ status = gcvSTATUS_OUT_OF_MEMORY;
++ break;
++ }
++
++ if (Physical != ~0U)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ pages[i] = pfn_to_page((Physical >> PAGE_SHIFT) + i);
++ get_page(pages[i]);
++ }
++ }
++ else
++ {
++ /* Get the user pages. */
++ down_read(&current->mm->mmap_sem);
++
++ result = get_user_pages(current,
++ current->mm,
++ memory & PAGE_MASK,
++ pageCount,
++ 1,
++ 0,
++ pages,
++ gcvNULL
++ );
++
++ up_read(&current->mm->mmap_sem);
++
++ if (result <=0 || result < pageCount)
++ {
++ struct vm_area_struct *vma;
++
++ /* Release the pages if any. */
++ if (result > 0)
++ {
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++
++ page_cache_release(pages[i]);
++ pages[i] = gcvNULL;
++ }
++
++ result = 0;
++ }
++
++ vma = find_vma(current->mm, memory);
++
++ if (vma && (vma->vm_flags & VM_PFNMAP))
++ {
++ pte_t * pte;
++ spinlock_t * ptl;
++ gctUINTPTR_T logical = memory;
++
++ for (i = 0; i < pageCount; i++)
++ {
++ pgd_t * pgd = pgd_offset(current->mm, logical);
++ pud_t * pud = pud_offset(pgd, logical);
++
++ if (pud)
++ {
++ pmd_t * pmd = pmd_offset(pud, logical);
++ pte = pte_offset_map_lock(current->mm, pmd, logical, &ptl);
++ if (!pte)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ pages[i] = pte_page(*pte);
++ pte_unmap_unlock(pte, ptl);
++
++ /* Advance to next. */
++ logical += PAGE_SIZE;
++ }
++ }
++ else
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Check if this memory is contiguous for old mmu. */
++ if (Os->device->kernels[Core]->hardware->mmuVersion == 0)
++ {
++ for (i = 1; i < pageCount; i++)
++ {
++ if (pages[i] != nth_page(pages[0], i))
++ {
++ /* Non-contiguous. */
++ break;
++ }
++ }
++
++ if (i == pageCount)
++ {
++ /* Contiguous memory. */
++ physical = page_to_phys(pages[0]) | (memory & ~PAGE_MASK);
++
++ if (!((physical - Os->device->baseAddress) & 0x80000000))
++ {
++ kfree(pages);
++ pages = gcvNULL;
++
++ info->pages = gcvNULL;
++ info->pageTable = gcvNULL;
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ *Address = physical - Os->device->baseAddress;
++ *Info = info;
++
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x",
++ *Info, *Address);
++
++ return gcvSTATUS_OK;
++ }
++ }
++ }
++
++ /* Reference pages. */
++ for (i = 0; i < pageCount; i++)
++ {
++ get_page(pages[i]);
++ }
++ }
++ }
++
++ for (i = 0; i < pageCount; i++)
++ {
++#ifdef CONFIG_ARM
++ gctUINT32 data;
++ get_user(data, (gctUINT32*)((memory & PAGE_MASK) + i * PAGE_SIZE));
++#endif
++
++ /* Flush(clean) the data cache. */
++ gcmkONERROR(gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
++ (gctPOINTER)(gctUINTPTR_T)page_to_phys(pages[i]),
++ (gctPOINTER)(memory & PAGE_MASK) + i*PAGE_SIZE,
++ PAGE_SIZE));
++ }
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckVGMMU_AllocatePages(Os->device->kernels[Core]->vg->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++ }
++ else
++#endif
++ {
++ /* Allocate pages inside the page table. */
++ gcmkERR_BREAK(gckMMU_AllocatePages(Os->device->kernels[Core]->mmu,
++ pageCount * (PAGE_SIZE/4096),
++ (gctPOINTER *) &pageTable,
++ &address));
++ }
++
++ /* Fill the page table. */
++ for (i = 0; i < pageCount; i++)
++ {
++ gctUINT32 phys;
++ gctUINT32_PTR tab = pageTable + i * (PAGE_SIZE/4096);
++
++ phys = page_to_phys(pages[i]);
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckVGMMU_SetPage(Os->device->kernels[Core]->vg->mmu,
++ phys,
++ tab));
++ }
++ else
++#endif
++ {
++ /* Get the physical address from page struct. */
++ gcmkONERROR(
++ gckMMU_SetPage(Os->device->kernels[Core]->mmu,
++ phys,
++ tab));
++ }
++
++ for (j = 1; j < (PAGE_SIZE/4096); j++)
++ {
++ pageTable[i * (PAGE_SIZE/4096) + j] = pageTable[i * (PAGE_SIZE/4096)] + 4096 * j;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pageTable[%d]: 0x%X 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, phys, pageTable[i]);
++ }
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ gcmkONERROR(gckVGMMU_Flush(Os->device->kernels[Core]->vg->mmu));
++ }
++ else
++#endif
++ {
++ gcmkONERROR(gckMMU_Flush(Os->device->kernels[Core]->mmu));
++ }
++
++ /* Save pointer to page table. */
++ info->pageTable = pageTable;
++ info->pages = pages;
++
++ *Info = (gctPOINTER) info;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info->pages: 0x%X, info->pageTable: 0x%X, info: 0x%X.",
++ __FUNCTION__, __LINE__,
++ info->pages,
++ info->pageTable,
++ info
++ );
++
++ offset = (Physical != ~0U)
++ ? (Physical & ~PAGE_MASK)
++ : (memory & ~PAGE_MASK);
++
++ /* Return address. */
++ *Address = address + offset;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): Address: 0x%X.",
++ __FUNCTION__, __LINE__,
++ *Address
++ );
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++OnError:
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error occured: %d.",
++ __FUNCTION__, __LINE__,
++ status
++ );
++
++ /* Release page array. */
++ if (result > 0 && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: page table is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ for (i = 0; i < result; i++)
++ {
++ if (pages[i] == gcvNULL)
++ {
++ break;
++ }
++ page_cache_release(pages[i]);
++ }
++ }
++
++ if (info!= gcvNULL && pages != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: pages is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page table. */
++ kfree(pages);
++ info->pages = gcvNULL;
++ }
++
++ /* Release page info struct. */
++ if (info != gcvNULL)
++ {
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): error: info is freed.",
++ __FUNCTION__, __LINE__
++ );
++
++ /* Free the page info struct. */
++ kfree(info);
++ *Info = gcvNULL;
++ }
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ if (gcmIS_SUCCESS(status))
++ {
++ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x", *Info, *Address);
++ }
++ else
++ {
++ gcmkFOOTER();
++ }
++
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapUserMemory
++**
++** Unlock a user buffer and that was previously locked down by
++** gckOS_MapUserMemory.
++**
++** INPUT:
++**
++** gctPOINTER Memory
++** Pointer to memory to unlock.
++**
++** gctSIZE_T Size
++** Size in bytes of the memory to unlock.
++**
++** gctPOINTER Info
++** Information record returned by gckOS_MapUserMemory.
++**
++** gctUINT32_PTR Address
++** The address returned by gckOS_MapUserMemory.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UnmapUserMemory(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Size,
++ IN gctPOINTER Info,
++ IN gctUINT32 Address
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Memory=0x%X Size=%lu Info=0x%X Address0x%08x",
++ Os, Core, Memory, Size, Info, Address);
++
++#if gcdSECURE_USER
++ gcmkONERROR(gckOS_RemoveMapping(Os, Memory, Size));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++#else
++{
++ gctUINTPTR_T memory, start, end;
++ gcsPageInfo_PTR info;
++ gctSIZE_T pageCount, i;
++ struct page **pages;
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Size > 0);
++ gcmkVERIFY_ARGUMENT(Info != gcvNULL);
++
++ do
++ {
++ info = (gcsPageInfo_PTR) Info;
++
++ pages = info->pages;
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): info=0x%X, pages=0x%X.",
++ __FUNCTION__, __LINE__,
++ info, pages
++ );
++
++ /* Invalid page array. */
++ if (pages == gcvNULL && info->pageTable == gcvNULL)
++ {
++ kfree(info);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++ }
++
++ memory = (gctUINTPTR_T)Memory;
++ end = (memory + Size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ start = memory >> PAGE_SHIFT;
++ pageCount = end - start;
++
++ /* Overflow. */
++ if ((memory + Size) < memory)
++ {
++ gcmkFOOTER_ARG("status=%d", gcvSTATUS_INVALID_ARGUMENT);
++ return gcvSTATUS_INVALID_ARGUMENT;
++ }
++
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): memory: 0x%X, pageCount: %d, pageTable: 0x%X.",
++ __FUNCTION__, __LINE__,
++ memory, pageCount, info->pageTable
++ );
++
++ MEMORY_MAP_LOCK(Os);
++
++ gcmkASSERT(info->pageTable != gcvNULL);
++
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ /* Free the pages from the MMU. */
++ gcmkERR_BREAK(gckVGMMU_FreePages(Os->device->kernels[Core]->vg->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++ }
++ else
++#endif
++ {
++ /* Free the pages from the MMU. */
++ gcmkERR_BREAK(gckMMU_FreePages(Os->device->kernels[Core]->mmu,
++ info->pageTable,
++ pageCount * (PAGE_SIZE/4096)
++ ));
++ }
++
++ /* Release the page cache. */
++ if (pages)
++ {
++ for (i = 0; i < pageCount; i++)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_OS,
++ "%s(%d): pages[%d]: 0x%X.",
++ __FUNCTION__, __LINE__,
++ i, pages[i]
++ );
++
++ if (!PageReserved(pages[i]))
++ {
++ SetPageDirty(pages[i]);
++ }
++
++ page_cache_release(pages[i]);
++ }
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ if (info != gcvNULL)
++ {
++ /* Free the page array. */
++ if (info->pages != gcvNULL)
++ {
++ kfree(info->pages);
++ }
++
++ kfree(info);
++ }
++
++ MEMORY_MAP_UNLOCK(Os);
++
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++}
++
++/*******************************************************************************
++**
++** gckOS_GetBaseAddress
++**
++** Get the base address for the physical memory.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR BaseAddress
++** Pointer to a variable that will receive the base address.
++*/
++gceSTATUS
++gckOS_GetBaseAddress(
++ IN gckOS Os,
++ OUT gctUINT32_PTR BaseAddress
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(BaseAddress != gcvNULL);
++
++ /* Return base address. */
++ *BaseAddress = Os->device->baseAddress;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*BaseAddress=0x%08x", *BaseAddress);
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_SuspendInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_SuspendInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++gceSTATUS
++gckOS_SuspendInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ disable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_ResumeInterrupt(
++ IN gckOS Os
++ )
++{
++ return gckOS_ResumeInterruptEx(Os, gcvCORE_MAJOR);
++}
++
++gceSTATUS
++gckOS_ResumeInterruptEx(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ enable_irq(Os->device->irqLines[Core]);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_MemCopy(
++ IN gctPOINTER Destination,
++ IN gctCONST_POINTER Source,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Destination=0x%X Source=0x%X Bytes=%lu",
++ Destination, Source, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Destination != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Source != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memcpy(Destination, Source, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_ZeroMemory(
++ IN gctPOINTER Memory,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Memory=0x%X Bytes=%lu", Memory, Bytes);
++
++ gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++ memset(Memory, 0, Bytes);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Cache Control ********************************
++*******************************************************************************/
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED && defined(CONFIG_OUTER_CACHE)
++static inline gceSTATUS
++outer_func(
++ gceCACHEOPERATION Type,
++ unsigned long Start,
++ unsigned long End
++ )
++{
++ switch (Type)
++ {
++ case gcvCACHE_CLEAN:
++ outer_clean_range(Start, End);
++ break;
++ case gcvCACHE_INVALIDATE:
++ outer_inv_range(Start, End);
++ break;
++ case gcvCACHE_FLUSH:
++ outer_flush_range(Start, End);
++ break;
++ default:
++ return gcvSTATUS_INVALID_ARGUMENT;
++ break;
++ }
++ return gcvSTATUS_OK;
++}
++
++#if gcdENABLE_OUTER_CACHE_PATCH
++/*******************************************************************************
++** _HandleOuterCache
++**
++** Handle the outer cache for the specified addresses.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++**
++** gceOUTERCACHE_OPERATION Type
++** Operation need to be execute.
++*/
++static gceSTATUS
++_HandleOuterCache(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Type
++ )
++{
++ gceSTATUS status;
++ gctUINT32 i, pageNum;
++ unsigned long paddr;
++ gctPOINTER vaddr;
++
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ if (Physical != gcvNULL)
++ {
++ /* Non paged memory or gcvPOOL_USER surface */
++ paddr = (unsigned long) Physical;
++ gcmkONERROR(outer_func(Type, paddr, paddr + Bytes));
++ }
++ else if ((Handle == gcvNULL)
++ || (Handle != gcvNULL && ((PLINUX_MDL)Handle)->contiguous)
++ )
++ {
++ /* Video Memory or contiguous virtual memory */
++ gcmkONERROR(gckOS_GetPhysicalAddress(Os, Logical, (gctUINT32*)&paddr));
++ gcmkONERROR(outer_func(Type, paddr, paddr + Bytes));
++ }
++ else
++ {
++ /* Non contiguous virtual memory */
++ vaddr = (gctPOINTER)gcmALIGN_BASE((gctUINTPTR_T)Logical, PAGE_SIZE);
++ pageNum = GetPageCount(Bytes, 0);
++
++ for (i = 0; i < pageNum; i += 1)
++ {
++ gcmkONERROR(_ConvertLogical2Physical(
++ Os,
++ vaddr + PAGE_SIZE * i,
++ ProcessID,
++ (PLINUX_MDL)Handle,
++ (gctUINT32*)&paddr
++ ));
++
++ gcmkONERROR(outer_func(Type, paddr, paddr + PAGE_SIZE));
++ }
++ }
++
++ mb();
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++#endif
++#endif
++
++/*******************************************************************************
++** gckOS_CacheClean
++**
++** Clean the cache for the specified addresses. The GPU is going to need the
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Physical
++** Physical address to flush.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheClean(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_TO_DEVICE);
++# else
++ dmac_clean_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_CLEAN);
++#else
++ outer_clean_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++
++ dma_cache_wback((unsigned long) Logical, Bytes);
++
++#elif defined(CONFIG_PPC)
++
++ /* TODO */
++
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_TO_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheInvalidate
++**
++** Invalidate the cache for the specified addresses. The GPU is going to need
++** data. If the system is allocating memory as non-cachable, this function can
++** be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheInvalidate(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++
++ /* Inner cache. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
++ dmac_map_area(Logical, Bytes, DMA_FROM_DEVICE);
++# else
++ dmac_inv_range(Logical, Logical + Bytes);
++# endif
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_INVALIDATE);
++#else
++ outer_inv_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_FROM_DEVICE);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++** gckOS_CacheFlush
++**
++** Clean the cache for the specified addresses and invalidate the lines as
++** well. The GPU is going to need and modify the data. If the system is
++** allocating memory as non-cachable, this function can be ignored.
++**
++** ARGUMENTS:
++**
++** gckOS Os
++** Pointer to gckOS object.
++**
++** gctUINT32 ProcessID
++** Process ID Logical belongs.
++**
++** gctPHYS_ADDR Handle
++** Physical address handle. If gcvNULL it is video memory.
++**
++** gctPOINTER Logical
++** Logical address to flush.
++**
++** gctSIZE_T Bytes
++** Size of the address range in bytes to flush.
++*/
++gceSTATUS
++gckOS_CacheFlush(
++ IN gckOS Os,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctPOINTER Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X ProcessID=%d Handle=0x%X Logical=0x%X Bytes=%lu",
++ Os, ProcessID, Handle, Logical, Bytes);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Logical != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Bytes > 0);
++
++#if !gcdCACHE_FUNCTION_UNIMPLEMENTED
++#ifdef CONFIG_ARM
++ /* Inner cache. */
++ dmac_flush_range(Logical, Logical + Bytes);
++
++#if defined(CONFIG_OUTER_CACHE)
++ /* Outer cache. */
++#if gcdENABLE_OUTER_CACHE_PATCH
++ _HandleOuterCache(Os, ProcessID, Handle, Physical, Logical, Bytes, gcvCACHE_FLUSH);
++#else
++ outer_flush_range((unsigned long) Handle, (unsigned long) Handle + Bytes);
++#endif
++#endif
++
++#elif defined(CONFIG_MIPS)
++ dma_cache_wback_inv((unsigned long) Logical, Bytes);
++#elif defined(CONFIG_PPC)
++ /* TODO */
++#else
++ dma_sync_single_for_device(
++ gcvNULL,
++ (dma_addr_t)Physical,
++ Bytes,
++ DMA_BIDIRECTIONAL);
++#endif
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************* Broadcasting *********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_Broadcast
++**
++** System hook for broadcast events from the kernel driver.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gceBROADCAST Reason
++** Reason for the broadcast. Can be one of the following values:
++**
++** gcvBROADCAST_GPU_IDLE
++** Broadcasted when the kernel driver thinks the GPU might be
++** idle. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_COMMIT
++** Broadcasted when any client process commits a command
++** buffer. This can be used to handle power management.
++**
++** gcvBROADCAST_GPU_STUCK
++** Broadcasted when the kernel driver hits the timeout waiting
++** for the GPU.
++**
++** gcvBROADCAST_FIRST_PROCESS
++** First process is trying to connect to the kernel.
++**
++** gcvBROADCAST_LAST_PROCESS
++** Last process has detached from the kernel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Broadcast(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gceBROADCAST Reason
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Hardware=0x%X Reason=%d", Os, Hardware, Reason);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_OBJECT(Hardware, gcvOBJ_HARDWARE);
++
++ switch (Reason)
++ {
++ case gcvBROADCAST_FIRST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "First process has attached");
++ break;
++
++ case gcvBROADCAST_LAST_PROCESS:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "Last process has detached");
++
++ /* Put GPU OFF. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++ gcvPOWER_OFF_BROADCAST));
++ break;
++
++ case gcvBROADCAST_GPU_IDLE:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "GPU idle.");
++
++ /* Put GPU IDLE. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware,
++#if gcdPOWER_SUSNPEND_WHEN_IDLE
++ gcvPOWER_SUSPEND_BROADCAST));
++#else
++ gcvPOWER_IDLE_BROADCAST));
++#endif
++
++ /* Add idle process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 1,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++ break;
++
++ case gcvBROADCAST_GPU_COMMIT:
++ gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "COMMIT has arrived.");
++
++ /* Add busy process DB. */
++ gcmkONERROR(gckKERNEL_AddProcessDB(Hardware->kernel,
++ 0,
++ gcvDB_IDLE,
++ gcvNULL, gcvNULL, 0));
++
++ /* Put GPU ON. */
++ gcmkONERROR(
++ gckHARDWARE_SetPowerManagementState(Hardware, gcvPOWER_ON_AUTO));
++ break;
++
++ case gcvBROADCAST_GPU_STUCK:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_GPU_STUCK\n");
++#if !gcdENABLE_RECOVERY
++ gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware));
++#endif
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++
++ case gcvBROADCAST_AXI_BUS_ERROR:
++ gcmkTRACE_N(gcvLEVEL_ERROR, 0, "gcvBROADCAST_AXI_BUS_ERROR\n");
++ gcmkONERROR(gckHARDWARE_DumpGPUState(Hardware));
++ gcmkONERROR(gckKERNEL_Recovery(Hardware->kernel));
++ break;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastHurry
++**
++** The GPU is running too slow.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Urgency
++** The higher the number, the higher the urgency to speed up the GPU.
++** The maximum value is defined by the gcdDYNAMIC_EVENT_THRESHOLD.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastHurry(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Urgency
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Urgency=%u", Os, Hardware, Urgency);
++
++ /* Do whatever you need to do to speed up the GPU now. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_BroadcastCalibrateSpeed
++**
++** Calibrate the speed of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gckHARDWARE Hardware
++** Pointer to the gckHARDWARE object.
++**
++** gctUINT Idle, Time
++** Idle/Time will give the percentage the GPU is idle, so you can use
++** this to calibrate the working point of the GPU.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_BroadcastCalibrateSpeed(
++ IN gckOS Os,
++ IN gckHARDWARE Hardware,
++ IN gctUINT Idle,
++ IN gctUINT Time
++ )
++{
++ gcmkHEADER_ARG("Os=0x%x Hardware=0x%x Idle=%u Time=%u",
++ Os, Hardware, Idle, Time);
++
++ /* Do whatever you need to do to callibrate the GPU speed. */
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++********************************** Semaphores **********************************
++*******************************************************************************/
++
++/*******************************************************************************
++**
++** gckOS_CreateSemaphore
++**
++** Create a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** OUTPUT:
++**
++** gctPOINTER * Semaphore
++** Pointer to the variable that will receive the created semaphore.
++*/
++gceSTATUS
++gckOS_CreateSemaphore(
++ IN gckOS Os,
++ OUT gctPOINTER * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore *sem = gcvNULL;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Allocate the semaphore structure. */
++ sem = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (sem == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(sem, 1);
++
++ /* Return to caller. */
++ *Semaphore = (gctPOINTER) sem;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_AcquireSemaphore
++**
++** Acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_AcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%08X Semaphore=0x%08X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_interruptible((struct semaphore *) Semaphore))
++ {
++ gcmkONERROR(gcvSTATUS_INTERRUPTED);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_TryAcquireSemaphore
++**
++** Try to acquire a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be acquired.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_TryAcquireSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%x", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Acquire the semaphore. */
++ if (down_trylock((struct semaphore *) Semaphore))
++ {
++ /* Timeout. */
++ status = gcvSTATUS_TIMEOUT;
++ gcmkFOOTER();
++ return status;
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ReleaseSemaphore
++**
++** Release a previously acquired semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be released.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ReleaseSemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Release the semaphore. */
++ up((struct semaphore *) Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySemaphore
++**
++** Destroy a semaphore.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Semaphore
++** Pointer to the semaphore thet needs to be destroyed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySemaphore(
++ IN gckOS Os,
++ IN gctPOINTER Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%X", Os, Semaphore);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Free the sempahore structure. */
++ kfree(Semaphore);
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetProcessID
++**
++** Get current process ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ProcessID
++** Pointer to the variable that receives the process ID.
++*/
++gceSTATUS
++gckOS_GetProcessID(
++ OUT gctUINT32_PTR ProcessID
++ )
++{
++ /* Get process ID. */
++ if (ProcessID != gcvNULL)
++ {
++ *ProcessID = _GetProcessID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_GetThreadID
++**
++** Get current thread ID.
++**
++** INPUT:
++**
++** Nothing.
++**
++** OUTPUT:
++**
++** gctUINT32_PTR ThreadID
++** Pointer to the variable that receives the thread ID.
++*/
++gceSTATUS
++gckOS_GetThreadID(
++ OUT gctUINT32_PTR ThreadID
++ )
++{
++ /* Get thread ID. */
++ if (ThreadID != gcvNULL)
++ {
++ *ThreadID = _GetThreadID();
++ }
++
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUPower
++**
++** Set the power of the GPU on or off.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctBOOL Clock
++** gcvTRUE to turn on the clock, or gcvFALSE to turn off the clock.
++**
++** gctBOOL Power
++** gcvTRUE to turn on the power, or gcvFALSE to turn off the power.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUPower(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctBOOL Clock,
++ IN gctBOOL Power
++ )
++{
++ struct clk *clk_3dcore = Os->device->clk_3d_core;
++ struct clk *clk_3dshader = Os->device->clk_3d_shader;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct clk *clk_3d_axi = Os->device->clk_3d_axi;
++#endif
++ struct clk *clk_2dcore = Os->device->clk_2d_core;
++ struct clk *clk_2d_axi = Os->device->clk_2d_axi;
++ struct clk *clk_vg_axi = Os->device->clk_vg_axi;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ int ret;
++#endif
++
++ gctBOOL oldClockState = gcvFALSE;
++ gctBOOL oldPowerState = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d Clock=%d Power=%d", Os, Core, Clock, Power);
++
++ if (Os->device->kernels[Core] != NULL)
++ {
++#if gcdENABLE_VG
++ if (Core == gcvCORE_VG)
++ {
++ oldClockState = Os->device->kernels[Core]->vg->hardware->clockState;
++ oldPowerState = Os->device->kernels[Core]->vg->hardware->powerState;
++ }
++ else
++ {
++#endif
++ oldClockState = Os->device->kernels[Core]->hardware->clockState;
++ oldPowerState = Os->device->kernels[Core]->hardware->powerState;
++#if gcdENABLE_VG
++ }
++#endif
++ }
++ if((Power == gcvTRUE) && (oldPowerState == gcvFALSE))
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(Os->device->gpu_regulator)) {
++ ret = regulator_enable(Os->device->gpu_regulator);
++ if (ret != 0)
++ gckOS_Print("%s(%d): fail to enable pu regulator %d!\n",
++ __FUNCTION__, __LINE__, ret);
++ }
++#else
++ imx_gpc_power_up_pu(true);
++#endif
++
++#ifdef CONFIG_PM
++ pm_runtime_get_sync(Os->device->pmdev);
++#endif
++ }
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Clock == gcvTRUE) {
++ if (oldClockState == gcvFALSE) {
++ mutex_lock(&Os->gpu_clk_mutex);
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_enable(clk_3dcore);
++ if (cpu_is_mx6q())
++ clk_enable(clk_3dshader);
++ break;
++ case gcvCORE_2D:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ Os->gpu_clk_on[Core] = 1;
++ mutex_unlock(&Os->gpu_clk_mutex);
++ }
++ } else {
++ if (oldClockState == gcvTRUE) {
++ mutex_lock(&Os->gpu_clk_mutex);
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ if (cpu_is_mx6q())
++ clk_disable(clk_3dshader);
++ clk_disable(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ Os->gpu_clk_on[Core] = 0;
++ mutex_unlock(&Os->gpu_clk_mutex);
++ }
++ }
++#else
++ if (Clock == gcvTRUE) {
++ if (oldClockState == gcvFALSE) {
++ mutex_lock(&Os->gpu_clk_mutex);
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_prepare_enable(clk_3dcore);
++ clk_prepare_enable(clk_3dshader);
++ clk_prepare_enable(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_prepare_enable(clk_2dcore);
++ clk_prepare_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_prepare_enable(clk_2dcore);
++ clk_prepare_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ Os->gpu_clk_on[Core] = 1;
++ mutex_unlock(&Os->gpu_clk_mutex);
++ }
++ } else {
++ if (oldClockState == gcvTRUE) {
++ mutex_lock(&Os->gpu_clk_mutex);
++ switch (Core) {
++ case gcvCORE_MAJOR:
++ clk_disable_unprepare(clk_3d_axi);
++ clk_disable_unprepare(clk_3dshader);
++ clk_disable_unprepare(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable_unprepare(clk_2d_axi);
++ clk_disable_unprepare(clk_2dcore);
++ break;
++ case gcvCORE_VG:
++ clk_disable_unprepare(clk_vg_axi);
++ clk_disable_unprepare(clk_2dcore);
++ break;
++ default:
++ break;
++ }
++ Os->gpu_clk_on[Core] = 0;
++ mutex_unlock(&Os->gpu_clk_mutex);
++ }
++ }
++#endif
++ if((Power == gcvFALSE) && (oldPowerState == gcvTRUE))
++ {
++#ifdef CONFIG_PM
++ pm_runtime_put_sync(Os->device->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(Os->device->gpu_regulator))
++ regulator_disable(Os->device->gpu_regulator);
++#else
++ imx_gpc_power_up_pu(false);
++#endif
++
++ }
++ /* TODO: Put your code here. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_ResetGPU
++**
++** Reset the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_ResetGPU(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#define SRC_SCR_OFFSET 0
++#define BP_SRC_SCR_GPU3D_RST 1
++#define BP_SRC_SCR_GPU2D_RST 4
++ void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR);
++ gctUINT32 bit_offset,val;
++
++ gcmkHEADER_ARG("Os=0x%X Core=%d", Os, Core);
++
++ if(Core == gcvCORE_MAJOR) {
++ bit_offset = BP_SRC_SCR_GPU3D_RST;
++ } else if((Core == gcvCORE_VG)
++ ||(Core == gcvCORE_2D)) {
++ bit_offset = BP_SRC_SCR_GPU2D_RST;
++ } else {
++ return gcvSTATUS_INVALID_CONFIG;
++ }
++ val = __raw_readl(src_base + SRC_SCR_OFFSET);
++ val &= ~(1 << (bit_offset));
++ val |= (1 << (bit_offset));
++ __raw_writel(val, src_base + SRC_SCR_OFFSET);
++
++ while ((__raw_readl(src_base + SRC_SCR_OFFSET) &
++ (1 << (bit_offset))) != 0) {
++ }
++
++ gcmkFOOTER_NO();
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct reset_control *rstc = Os->device->rstc[Core];
++ if (rstc)
++ reset_control_reset(rstc);
++#else
++ imx_src_reset_gpu((int)Core);
++#endif
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_PrepareGPUFrequency
++**
++** Prepare to set GPU frequency and voltage.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage will be set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_PrepareGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_FinishGPUFrequency
++**
++** Finish GPU frequency setting.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose frequency and voltage is set.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_FinishGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_QueryGPUFrequency
++**
++** Query the current frequency of the GPU.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT32 * Frequency
++** Pointer to a gctUINT32 to obtain current frequency, in MHz.
++**
++** gctUINT8 * Scale
++** Pointer to a gctUINT8 to obtain current scale(1 - 64).
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_QueryGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ OUT gctUINT32 * Frequency,
++ OUT gctUINT8 * Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetGPUFrequency
++**
++** Set frequency and voltage of the GPU.
++**
++** 1. DVFS manager gives the target scale of full frequency, BSP must find
++** a real frequency according to this scale and board's configure.
++**
++** 2. BSP should find a suitable voltage for this frequency.
++**
++** 3. BSP must make sure setting take effect before this function returns.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to a gckOS object.
++**
++** gckCORE Core
++** GPU whose power is set.
++**
++** gctUINT8 Scale
++** Target scale of full frequency, range is [1, 64]. 1 means 1/64 of
++** full frequency and 64 means 64/64 of full frequency.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetGPUFrequency(
++ IN gckOS Os,
++ IN gceCORE Core,
++ IN gctUINT8 Scale
++ )
++{
++ return gcvSTATUS_OK;
++}
++
++/*----------------------------------------------------------------------------*/
++/*----- Profile --------------------------------------------------------------*/
++
++gceSTATUS
++gckOS_GetProfileTick(
++ OUT gctUINT64_PTR Tick
++ )
++{
++ struct timespec time;
++
++ ktime_get_ts(&time);
++
++ *Tick = time.tv_nsec + time.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_QueryProfileTickRate(
++ OUT gctUINT64_PTR TickRate
++ )
++{
++ struct timespec res;
++
++ hrtimer_get_res(CLOCK_MONOTONIC, &res);
++
++ *TickRate = res.tv_nsec + res.tv_sec * 1000000000ULL;
++
++ return gcvSTATUS_OK;
++}
++
++gctUINT32
++gckOS_ProfileToMS(
++ IN gctUINT64 Ticks
++ )
++{
++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
++ return div_u64(Ticks, 1000000);
++#else
++ gctUINT64 rem = Ticks;
++ gctUINT64 b = 1000000;
++ gctUINT64 res, d = 1;
++ gctUINT32 high = rem >> 32;
++
++ /* Reduce the thing a bit first */
++ res = 0;
++ if (high >= 1000000)
++ {
++ high /= 1000000;
++ res = (gctUINT64) high << 32;
++ rem -= (gctUINT64) (high * 1000000) << 32;
++ }
++
++ while (((gctINT64) b > 0) && (b < rem))
++ {
++ b <<= 1;
++ d <<= 1;
++ }
++
++ do
++ {
++ if (rem >= b)
++ {
++ rem -= b;
++ res += d;
++ }
++
++ b >>= 1;
++ d >>= 1;
++ }
++ while (d);
++
++ return (gctUINT32) res;
++#endif
++}
++
++/******************************************************************************\
++******************************* Signal Management ******************************
++\******************************************************************************/
++
++#undef _GC_OBJ_ZONE
++#define _GC_OBJ_ZONE gcvZONE_SIGNAL
++
++/*******************************************************************************
++**
++** gckOS_CreateSignal
++**
++** Create a new signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctSIGNAL * Signal
++** Pointer to a variable receiving the created gctSIGNAL.
++*/
++gceSTATUS
++gckOS_CreateSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctSIGNAL * Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X ManualReset=%d", Os, ManualReset);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ /* Create an event structure. */
++ signal = (gcsSIGNAL_PTR) kmalloc(sizeof(gcsSIGNAL), GFP_KERNEL | gcdNOWARN);
++
++ if (signal == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Save the process ID. */
++ signal->process = (gctHANDLE)(gctUINTPTR_T) _GetProcessID();
++ signal->manualReset = ManualReset;
++ signal->hardware = gcvNULL;
++ init_completion(&signal->obj);
++ atomic_set(&signal->ref, 1);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->signalDB, signal, &signal->id));
++
++ *Signal = (gctSIGNAL)(gctUINTPTR_T)signal->id;
++
++ gcmkFOOTER_ARG("*Signal=0x%X", *Signal);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (signal != gcvNULL)
++ {
++ kfree(signal);
++ }
++
++ gcmkFOOTER_NO();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalQueryHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ OUT gckHARDWARE * Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Hardware != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ *Hardware = signal->hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSetHardware(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gckHARDWARE Hardware
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Hardware=0x%X", Os, Signal, Hardware);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ signal->hardware = Hardware;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroySignal
++**
++** Destroy a signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroySignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X", Os, Signal);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (atomic_dec_and_test(&signal->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->signalDB, signal->id));
++
++ /* Free the sgianl. */
++ kfree(signal);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_Signal
++**
++** Set a state of the specified signal.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_Signal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctBOOL State
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X State=%d", Os, Signal, State);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->signalMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ if (State)
++ {
++ /* unbind the signal from hardware. */
++ signal->hardware = gcvNULL;
++
++ /* Set the event to a signaled state. */
++ complete(&signal->obj);
++ }
++ else
++ {
++ /* Set the event to an unsignaled state. */
++ reinit_completion(&signal->obj);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_SetSignalVG(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ printk("gckOS_SetSignalVG:0x%x\n", result);
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++#endif
++
++/*******************************************************************************
++**
++** gckOS_UserSignal
++**
++** Set the specified signal which is owned by a process to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_UserSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process
++ )
++{
++ gceSTATUS status;
++ gctSIGNAL signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=%d",
++ Os, Signal, (gctINT32)(gctUINTPTR_T)Process);
++
++ /* Map the signal into kernel space. */
++ gcmkONERROR(gckOS_MapSignal(Os, Signal, Process, &signal));
++
++ /* Signal. */
++ status = gckOS_Signal(Os, signal, gcvTRUE);
++
++ /* Unmap the signal */
++ gcmkVERIFY_OK(gckOS_UnmapSignal(Os, Signal));
++
++ gcmkFOOTER();
++ return status;
++
++OnError:
++ /* Return the status. */
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitSignal
++**
++** Wait for a signal to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctUINT32 Wait
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsSIGNAL_PTR signal;
++
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Wait=0x%08X", Os, Signal, Wait);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ gcmkASSERT(signal->id == (gctUINT32)(gctUINTPTR_T)Signal);
++
++ might_sleep();
++
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ }
++ else if (Wait == 0)
++ {
++ status = gcvSTATUS_TIMEOUT;
++ }
++ else
++ {
++ /* Convert wait to milliseconds. */
++#if gcdDETECT_TIMEOUT
++ gctINT timeout = (Wait == gcvINFINITE)
++ ? gcdINFINITE_TIMEOUT * HZ / 1000
++ : Wait * HZ / 1000;
++
++ gctUINT complained = 0;
++#else
++ gctINT timeout = (Wait == gcvINFINITE)
++ ? MAX_SCHEDULE_TIMEOUT
++ : Wait * HZ / 1000;
++#endif
++
++ DECLARE_WAITQUEUE(wait, current);
++ wait.flags |= WQ_FLAG_EXCLUSIVE;
++ __add_wait_queue_tail(&signal->obj.wait, &wait);
++
++ while (gcvTRUE)
++ {
++ if (signal_pending(current))
++ {
++ /* Interrupt received. */
++ status = gcvSTATUS_INTERRUPTED;
++ break;
++ }
++
++ __set_current_state(TASK_INTERRUPTIBLE);
++ spin_unlock_irq(&signal->obj.wait.lock);
++ timeout = schedule_timeout(timeout);
++ spin_lock_irq(&signal->obj.wait.lock);
++
++ if (signal->obj.done)
++ {
++ if (!signal->manualReset)
++ {
++ signal->obj.done = 0;
++ }
++
++ status = gcvSTATUS_OK;
++ break;
++ }
++
++#if gcdDETECT_TIMEOUT
++ if ((Wait == gcvINFINITE) && (timeout == 0))
++ {
++ gctUINT32 dmaAddress1, dmaAddress2;
++ gctUINT32 dmaState1, dmaState2;
++
++ dmaState1 = dmaState2 =
++ dmaAddress1 = dmaAddress2 = 0;
++
++ /* Verify whether DMA is running. */
++ gcmkVERIFY_OK(_VerifyDMA(
++ Os, &dmaAddress1, &dmaAddress2, &dmaState1, &dmaState2
++ ));
++
++#if gcdDETECT_DMA_ADDRESS
++ /* Dump only if DMA appears stuck. */
++ if (
++ (dmaAddress1 == dmaAddress2)
++#if gcdDETECT_DMA_STATE
++ && (dmaState1 == dmaState2)
++#endif
++ )
++#endif
++ {
++ /* Increment complain count. */
++ complained += 1;
++
++ gcmkVERIFY_OK(_DumpGPUState(Os, gcvCORE_MAJOR));
++
++ gcmkPRINT(
++ "%s(%d): signal 0x%X; forced message flush (%d).",
++ __FUNCTION__, __LINE__, Signal, complained
++ );
++
++ /* Flush the debug cache. */
++ gcmkDEBUGFLUSH(dmaAddress2);
++ }
++
++ /* Reset timeout. */
++ timeout = gcdINFINITE_TIMEOUT * HZ / 1000;
++ }
++#endif
++
++ if (timeout == 0)
++ {
++
++ status = gcvSTATUS_TIMEOUT;
++ break;
++ }
++ }
++
++ __remove_wait_queue(&signal->obj.wait, &wait);
++
++#if gcdDETECT_TIMEOUT
++ if (complained)
++ {
++ gcmkPRINT(
++ "%s(%d): signal=0x%X; waiting done; status=%d",
++ __FUNCTION__, __LINE__, Signal, status
++ );
++ }
++#endif
++ }
++
++ spin_unlock_irq(&signal->obj.wait.lock);
++
++OnError:
++ /* Return status. */
++ gcmkFOOTER_ARG("Signal=0x%X status=%d", Signal, status);
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_MapSignal
++**
++** Map a signal in to the current process space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to tha gctSIGNAL to map.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** OUTPUT:
++**
++** gctSIGNAL * MappedSignal
++** Pointer to a variable receiving the mapped gctSIGNAL.
++*/
++gceSTATUS
++gckOS_MapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal,
++ IN gctHANDLE Process,
++ OUT gctSIGNAL * MappedSignal
++ )
++{
++ gceSTATUS status;
++ gcsSIGNAL_PTR signal;
++ gcmkHEADER_ARG("Os=0x%X Signal=0x%X Process=0x%X", Os, Signal, Process);
++
++ gcmkVERIFY_ARGUMENT(Signal != gcvNULL);
++ gcmkVERIFY_ARGUMENT(MappedSignal != gcvNULL);
++
++ gcmkONERROR(_QueryIntegerId(&Os->signalDB, (gctUINT32)(gctUINTPTR_T)Signal, (gctPOINTER)&signal));
++
++ if(atomic_inc_return(&signal->ref) <= 1)
++ {
++ /* The previous value is 0, it has been deleted. */
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ *MappedSignal = (gctSIGNAL) Signal;
++
++ /* Success. */
++ gcmkFOOTER_ARG("*MappedSignal=0x%X", *MappedSignal);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER_NO();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_UnmapSignal
++**
++** Unmap a signal .
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctSIGNAL Signal
++** Pointer to that gctSIGNAL mapped.
++*/
++gceSTATUS
++gckOS_UnmapSignal(
++ IN gckOS Os,
++ IN gctSIGNAL Signal
++ )
++{
++ return gckOS_DestroySignal(Os, Signal);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateUserSignal
++**
++** Create a new signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctBOOL ManualReset
++** If set to gcvTRUE, gckOS_Signal with gcvFALSE must be called in
++** order to set the signal to nonsignaled state.
++** If set to gcvFALSE, the signal will automatically be set to
++** nonsignaled state by gckOS_WaitSignal function.
++**
++** OUTPUT:
++**
++** gctINT * SignalID
++** Pointer to a variable receiving the created signal's ID.
++*/
++gceSTATUS
++gckOS_CreateUserSignal(
++ IN gckOS Os,
++ IN gctBOOL ManualReset,
++ OUT gctINT * SignalID
++ )
++{
++ gceSTATUS status;
++ gctSIZE_T signal;
++
++ /* Create a new signal. */
++ status = gckOS_CreateSignal(Os, ManualReset, (gctSIGNAL *) &signal);
++ *SignalID = (gctINT) signal;
++
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyUserSignal
++**
++** Destroy a signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** The signal's ID.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID
++ )
++{
++ return gckOS_DestroySignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID);
++}
++
++/*******************************************************************************
++**
++** gckOS_WaitUserSignal
++**
++** Wait for a signal used in the user mode to become signaled.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** Signal ID.
++**
++** gctUINT32 Wait
++** Number of milliseconds to wait.
++** Pass the value of gcvINFINITE for an infinite wait.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_WaitUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctUINT32 Wait
++ )
++{
++ return gckOS_WaitSignal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, Wait);
++}
++
++/*******************************************************************************
++**
++** gckOS_SignalUserSignal
++**
++** Set a state of the specified signal to be used in the user space.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to an gckOS object.
++**
++** gctINT SignalID
++** SignalID.
++**
++** gctBOOL State
++** If gcvTRUE, the signal will be set to signaled state.
++** If gcvFALSE, the signal will be set to nonsignaled state.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SignalUserSignal(
++ IN gckOS Os,
++ IN gctINT SignalID,
++ IN gctBOOL State
++ )
++{
++ return gckOS_Signal(Os, (gctSIGNAL)(gctUINTPTR_T)SignalID, State);
++}
++
++#if gcdENABLE_VG
++gceSTATUS
++gckOS_CreateSemaphoreVG(
++ IN gckOS Os,
++ OUT gctSEMAPHORE * Semaphore
++ )
++{
++ gceSTATUS status;
++ struct semaphore * newSemaphore;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Allocate the semaphore structure. */
++ newSemaphore = (struct semaphore *)kmalloc(gcmSIZEOF(struct semaphore), GFP_KERNEL | gcdNOWARN);
++ if (newSemaphore == gcvNULL)
++ {
++ gcmkERR_BREAK(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the semaphore. */
++ sema_init(newSemaphore, 0);
++
++ /* Set the handle. */
++ * Semaphore = (gctSEMAPHORE) newSemaphore;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++
++gceSTATUS
++gckOS_IncrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ /* Increment the semaphore's count. */
++ up((struct semaphore *) Semaphore);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DecrementSemaphore(
++ IN gckOS Os,
++ IN gctSEMAPHORE Semaphore
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++
++ gcmkHEADER_ARG("Os=0x%X Semaphore=0x%x", Os, Semaphore);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Semaphore != gcvNULL);
++
++ do
++ {
++ /* Decrement the semaphore's count. If the count is zero, wait
++ until it gets incremented. */
++ result = down_interruptible((struct semaphore *) Semaphore);
++
++ /* Signal received? */
++ if (result != 0)
++ {
++ status = gcvSTATUS_TERMINATE;
++ break;
++ }
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_SetSignal
++**
++** Set the specified signal to signaled state.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctHANDLE Process
++** Handle of process owning the signal.
++**
++** gctSIGNAL Signal
++** Pointer to the gctSIGNAL.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_SetSignal(
++ IN gckOS Os,
++ IN gctHANDLE Process,
++ IN gctSIGNAL Signal
++ )
++{
++ gceSTATUS status;
++ gctINT result;
++ struct task_struct * userTask;
++ struct siginfo info;
++
++ userTask = FIND_TASK_BY_PID((pid_t)(gctUINTPTR_T) Process);
++
++ if (userTask != gcvNULL)
++ {
++ info.si_signo = 48;
++ info.si_code = __SI_CODE(__SI_RT, SI_KERNEL);
++ info.si_pid = 0;
++ info.si_uid = 0;
++ info.si_ptr = (gctPOINTER) Signal;
++
++ /* Signals with numbers between 32 and 63 are real-time,
++ send a real-time signal to the user process. */
++ result = send_sig_info(48, &info, userTask);
++
++ /* Error? */
++ if (result < 0)
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++ else
++ {
++ status = gcvSTATUS_OK;
++ }
++ }
++ else
++ {
++ status = gcvSTATUS_GENERIC_IO;
++
++ gcmkTRACE(
++ gcvLEVEL_ERROR,
++ "%s(%d): an error has occurred.\n",
++ __FUNCTION__, __LINE__
++ );
++ }
++
++ /* Return status. */
++ return status;
++}
++
++/******************************************************************************\
++******************************** Thread Object *********************************
++\******************************************************************************/
++
++gceSTATUS
++gckOS_StartThread(
++ IN gckOS Os,
++ IN gctTHREADFUNC ThreadFunction,
++ IN gctPOINTER ThreadParameter,
++ OUT gctTHREAD * Thread
++ )
++{
++ gceSTATUS status;
++ struct task_struct * thread;
++
++ gcmkHEADER_ARG("Os=0x%X ", Os);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(ThreadFunction != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ do
++ {
++ /* Create the thread. */
++ thread = kthread_create(
++ ThreadFunction,
++ ThreadParameter,
++ "Vivante Kernel Thread"
++ );
++
++ /* Failed? */
++ if (IS_ERR(thread))
++ {
++ status = gcvSTATUS_GENERIC_IO;
++ break;
++ }
++
++ /* Start the thread. */
++ wake_up_process(thread);
++
++ /* Set the thread handle. */
++ * Thread = (gctTHREAD) thread;
++
++ /* Success. */
++ status = gcvSTATUS_OK;
++ }
++ while (gcvFALSE);
++
++ gcmkFOOTER();
++ /* Return the status. */
++ return status;
++}
++
++gceSTATUS
++gckOS_StopThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ /* Thread should have already been enabled to terminate. */
++ kthread_stop((struct task_struct *) Thread);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_VerifyThread(
++ IN gckOS Os,
++ IN gctTHREAD Thread
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X Thread=0x%x", Os, Thread);
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Thread != gcvNULL);
++
++ gcmkFOOTER_NO();
++ /* Success. */
++ return gcvSTATUS_OK;
++}
++#endif
++
++/******************************************************************************\
++******************************** Software Timer ********************************
++\******************************************************************************/
++
++void
++_TimerFunction(
++ struct work_struct * work
++ )
++{
++ gcsOSTIMER_PTR timer = (gcsOSTIMER_PTR)work;
++
++ gctTIMERFUNCTION function = timer->function;
++
++ function(timer->data);
++}
++
++/*******************************************************************************
++**
++** gckOS_CreateTimer
++**
++** Create a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctTIMERFUNCTION Function.
++** Pointer to a call back function which will be called when timer is
++** expired.
++**
++** gctPOINTER Data.
++** Private data which will be passed to call back function.
++**
++** OUTPUT:
++**
++** gctPOINTER * Timer
++** Pointer to a variable receiving the created timer.
++*/
++gceSTATUS
++gckOS_CreateTimer(
++ IN gckOS Os,
++ IN gctTIMERFUNCTION Function,
++ IN gctPOINTER Data,
++ OUT gctPOINTER * Timer
++ )
++{
++ gceSTATUS status;
++ gcsOSTIMER_PTR pointer;
++ gcmkHEADER_ARG("Os=0x%X Function=0x%X Data=0x%X", Os, Function, Data);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ gcmkONERROR(gckOS_Allocate(Os, sizeof(gcsOSTIMER), (gctPOINTER)&pointer));
++
++ pointer->function = Function;
++ pointer->data = Data;
++
++ INIT_DELAYED_WORK(&pointer->work, _TimerFunction);
++
++ *Timer = pointer;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++/*******************************************************************************
++**
++** gckOS_DestroyTimer
++**
++** Destory a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be destoryed.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_DestroyTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
++ cancel_delayed_work_sync(&timer->work);
++#else
++ cancel_delayed_work(&timer->work);
++ flush_workqueue(Os->workqueue);
++#endif
++
++ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, Timer));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StartTimer
++**
++** Schedule a software timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be scheduled.
++**
++** gctUINT32 Delay
++** Delay in milliseconds.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StartTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer,
++ IN gctUINT32 Delay
++ )
++{
++ gcsOSTIMER_PTR timer;
++
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X Delay=%u", Os, Timer, Delay);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++ gcmkVERIFY_ARGUMENT(Delay != 0);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++ if (unlikely(delayed_work_pending(&timer->work)))
++ {
++ if (unlikely(!cancel_delayed_work(&timer->work)))
++ {
++ cancel_work_sync(&timer->work.work);
++
++ if (unlikely(delayed_work_pending(&timer->work)))
++ {
++ gckOS_Print("gckOS_StartTimer error, the pending worker cannot complete!!!! \n");
++
++ return gcvSTATUS_INVALID_REQUEST;
++ }
++ }
++ }
++
++ queue_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay));
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++/*******************************************************************************
++**
++** gckOS_StopTimer
++**
++** Cancel a unscheduled timer.
++**
++** INPUT:
++**
++** gckOS Os
++** Pointer to the gckOS object.
++**
++** gctPOINTER Timer
++** Pointer to the timer to be cancel.
++**
++** OUTPUT:
++**
++** Nothing.
++*/
++gceSTATUS
++gckOS_StopTimer(
++ IN gckOS Os,
++ IN gctPOINTER Timer
++ )
++{
++ gcsOSTIMER_PTR timer;
++ gcmkHEADER_ARG("Os=0x%X Timer=0x%X", Os, Timer);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(Timer != gcvNULL);
++
++ timer = (gcsOSTIMER_PTR)Timer;
++
++ cancel_delayed_work(&timer->work);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckOS_DumpCallStack(
++ IN gckOS Os
++ )
++{
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ dump_stack();
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++
++gceSTATUS
++gckOS_GetProcessNameByPid(
++ IN gctINT Pid,
++ IN gctSIZE_T Length,
++ OUT gctUINT8_PTR String
++ )
++{
++ struct task_struct *task;
++
++ /* Get the task_struct of the task with pid. */
++ rcu_read_lock();
++
++ task = FIND_TASK_BY_PID(Pid);
++
++ if (task == gcvNULL)
++ {
++ rcu_read_unlock();
++ return gcvSTATUS_NOT_FOUND;
++ }
++
++ /* Get name of process. */
++ strncpy(String, task->comm, Length);
++
++ rcu_read_unlock();
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_GetVideoMemoryMutex(
++ IN gckOS Os,
++ OUT gctPOINTER *Mutex
++ )
++{
++ gcmkHEADER_ARG("Mutex=x%X", Mutex);
++
++ *Mutex = Os->vidmemMutex;
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++}
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++gceSTATUS
++gckOS_CreateSyncPoint(
++ IN gckOS Os,
++ OUT gctSYNC_POINT * SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++
++ /* Create an sync point structure. */
++ syncPoint = (gcsSYNC_POINT_PTR) kmalloc(
++ sizeof(gcsSYNC_POINT), GFP_KERNEL | gcdNOWARN);
++
++ if (syncPoint == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Initialize the sync point. */
++ atomic_set(&syncPoint->ref, 1);
++ atomic_set(&syncPoint->state, 0);
++
++ gcmkONERROR(_AllocateIntegerId(&Os->syncPointDB, syncPoint, &syncPoint->id));
++
++ *SyncPoint = (gctSYNC_POINT)(gctUINTPTR_T)syncPoint->id;
++
++ gcmkFOOTER_ARG("*SyncPonint=%d", syncPoint->id);
++ return gcvSTATUS_OK;
++
++OnError:
++ if (syncPoint != gcvNULL)
++ {
++ kfree(syncPoint);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_ReferenceSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X", Os);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Initialize the sync point. */
++ atomic_inc(&syncPoint->ref);
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_DestroySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ if (atomic_dec_and_test(&syncPoint->ref))
++ {
++ gcmkVERIFY_OK(_DestroyIntegerId(&Os->syncPointDB, syncPoint->id));
++
++ /* Free the sgianl. */
++ syncPoint->timeline = gcvNULL;
++ kfree(syncPoint);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_SignalSyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++ gctBOOL acquired = gcvFALSE;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
++ acquired = gcvTRUE;
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Get state. */
++ atomic_set(&syncPoint->state, gcvTRUE);
++
++ /* Signal timeline. */
++ if (syncPoint->timeline)
++ {
++ sync_timeline_signal(syncPoint->timeline);
++ }
++
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ acquired = gcvFALSE;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ if (acquired)
++ {
++ /* Release the mutex. */
++ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_QuerySyncPoint(
++ IN gckOS Os,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctBOOL_PTR State
++ )
++{
++ gceSTATUS status;
++ gcsSYNC_POINT_PTR syncPoint;
++
++ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Verify the arguments. */
++ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
++ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
++
++ /* Get state. */
++ *State = atomic_read(&syncPoint->state);
++
++ /* Success. */
++ gcmkFOOTER_ARG("*State=%d", *State);
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_CreateSyncTimeline(
++ IN gckOS Os,
++ OUT gctHANDLE * Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++
++ /* Create viv sync timeline. */
++ timeline = viv_sync_timeline_create("viv timeline", Os);
++
++ if (timeline == gcvNULL)
++ {
++ /* Out of memory. */
++ return gcvSTATUS_OUT_OF_MEMORY;
++ }
++
++ *Timeline = (gctHANDLE) timeline;
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_DestroySyncTimeline(
++ IN gckOS Os,
++ IN gctHANDLE Timeline
++ )
++{
++ struct viv_sync_timeline * timeline;
++ gcmkASSERT(Timeline != gcvNULL);
++
++ /* Destroy timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++ sync_timeline_destroy(&timeline->obj);
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CreateNativeFence(
++ IN gckOS Os,
++ IN gctHANDLE Timeline,
++ IN gctSYNC_POINT SyncPoint,
++ OUT gctINT * FenceFD
++ )
++{
++ int fd = -1;
++ struct viv_sync_timeline *timeline;
++ struct sync_pt * pt = gcvNULL;
++ struct sync_fence * fence;
++ char name[32];
++ gcsSYNC_POINT_PTR syncPoint;
++ gceSTATUS status;
++
++ gcmkHEADER_ARG("Os=0x%X Timeline=0x%X SyncPoint=%d",
++ Os, Timeline, (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ gcmkONERROR(
++ _QueryIntegerId(&Os->syncPointDB,
++ (gctUINT32)(gctUINTPTR_T)SyncPoint,
++ (gctPOINTER)&syncPoint));
++
++ /* Cast timeline. */
++ timeline = (struct viv_sync_timeline *) Timeline;
++
++ fd = get_unused_fd();
++
++ if (fd < 0)
++ {
++ /* Out of resources. */
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ /* Create viv_sync_pt. */
++ pt = viv_sync_pt_create(timeline, SyncPoint);
++
++ if (pt == gcvNULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Reference sync_timeline. */
++ syncPoint->timeline = &timeline->obj;
++
++ /* Build fence name. */
++ snprintf(name, 32, "viv sync_fence-%u", (gctUINT)(gctUINTPTR_T)SyncPoint);
++
++ /* Create sync_fence. */
++ fence = sync_fence_create(name, pt);
++
++ if (fence == NULL)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ /* Install fence to fd. */
++ sync_fence_install(fence, fd);
++
++ *FenceFD = fd;
++ gcmkFOOTER_ARG("*FenceFD=%d", fd);
++ return gcvSTATUS_OK;
++
++OnError:
++ /* Error roll back. */
++ if (pt)
++ {
++ sync_pt_free(pt);
++ }
++
++ if (fd > 0)
++ {
++ put_unused_fd(fd);
++ }
++
++ gcmkFOOTER();
++ return status;
++}
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_os.h 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,83 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_os_h_
++#define __gc_hal_kernel_os_h_
++
++typedef struct _LINUX_MDL_MAP
++{
++ gctINT pid;
++ gctPOINTER vmaAddr;
++ gctUINT32 count;
++ struct vm_area_struct * vma;
++ struct _LINUX_MDL_MAP * next;
++}
++LINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL_MAP * PLINUX_MDL_MAP;
++
++typedef struct _LINUX_MDL
++{
++ gctINT pid;
++ char * addr;
++
++ union _pages
++ {
++ /* Pointer to a array of pages. */
++ struct page * contiguousPages;
++ /* Pointer to a array of pointers to page. */
++ struct page ** nonContiguousPages;
++ }
++ u;
++
++#ifdef NO_DMA_COHERENT
++ gctPOINTER kaddr;
++#endif /* NO_DMA_COHERENT */
++
++ gctINT numPages;
++ gctINT pagedMem;
++ gctBOOL contiguous;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
++ gctBOOL exact;
++#endif
++ dma_addr_t dmaHandle;
++ PLINUX_MDL_MAP maps;
++ struct _LINUX_MDL * prev;
++ struct _LINUX_MDL * next;
++}
++LINUX_MDL, *PLINUX_MDL;
++
++extern PLINUX_MDL_MAP
++FindMdlMap(
++ IN PLINUX_MDL Mdl,
++ IN gctINT PID
++ );
++
++typedef struct _DRIVER_ARGS
++{
++ gctUINT64 InputBuffer;
++ gctUINT64 InputBufferSize;
++ gctUINT64 OutputBuffer;
++ gctUINT64 OutputBufferSize;
++}
++DRIVER_ARGS;
++
++#endif /* __gc_hal_kernel_os_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_platform.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_platform.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_platform.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_platform.h 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,279 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef _gc_hal_kernel_platform_h_
++#define _gc_hal_kernel_platform_h_
++#include <linux/mm.h>
++
++typedef struct _gcsMODULE_PARAMETERS
++{
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ gctINT irqLine3D0;
++ gctUINT registerMemBase3D0;
++ gctUINT registerMemSize3D0;
++ gctINT irqLine3D1;
++ gctUINT registerMemBase3D1;
++ gctUINT registerMemSize3D1;
++#else
++ gctINT irqLine;
++ gctUINT registerMemBase;
++ gctUINT registerMemSize;
++#endif
++ gctINT irqLine2D;
++ gctUINT registerMemBase2D;
++ gctUINT registerMemSize2D;
++ gctINT irqLineVG;
++ gctUINT registerMemBaseVG;
++ gctUINT registerMemSizeVG;
++ gctUINT contiguousSize;
++ gctUINT contiguousBase;
++ gctUINT contiguousRequested;
++ gctUINT bankSize;
++ gctINT fastClear;
++ gctINT compression;
++ gctINT powerManagement;
++ gctINT gpuProfiler;
++ gctINT signal;
++ gctUINT baseAddress;
++ gctUINT physSize;
++ gctUINT logFileSize;
++ gctUINT recovery;
++ gctUINT stuckDump;
++ gctUINT showArgs;
++ gctUINT gpu3DMinClock;
++}
++gcsMODULE_PARAMETERS;
++
++typedef struct _gcsPLATFORM * gckPLATFORM;
++
++typedef struct _gcsPLATFORM_OPERATIONS
++{
++ /*******************************************************************************
++ **
++ ** needAddDevice
++ **
++ ** Determine whether platform_device is created by initialization code.
++ ** If platform_device is created by BSP, return gcvFLASE here.
++ */
++ gctBOOL
++ (*needAddDevice)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustParam
++ **
++ ** Override content of arguments, if a argument is not changed here, it will
++ ** keep as default value or value set by insmod command line.
++ */
++ gceSTATUS
++ (*adjustParam)(
++ IN gckPLATFORM Platform,
++ OUT gcsMODULE_PARAMETERS *Args
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustDriver
++ **
++ ** Override content of platform_driver which will be registered.
++ */
++ gceSTATUS
++ (*adjustDriver)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** getPower
++ **
++ ** Prepare power and clock operation.
++ */
++ gceSTATUS
++ (*getPower)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** putPower
++ **
++ ** Finish power and clock operation.
++ */
++ gceSTATUS
++ (*putPower)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** allocPriv
++ **
++ ** Construct platform private data.
++ */
++ gceSTATUS
++ (*allocPriv)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** freePriv
++ **
++ ** free platform private data.
++ */
++ gceSTATUS
++ (*freePriv)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** setPower
++ **
++ ** Set power state of specified GPU.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to config.
++ **
++ ** gceBOOL Enable
++ ** Enable or disable power.
++ */
++ gceSTATUS
++ (*setPower)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ );
++
++ /*******************************************************************************
++ **
++ ** setClock
++ **
++ ** Set clock state of specified GPU.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to config.
++ **
++ ** gceBOOL Enable
++ ** Enable or disable clock.
++ */
++ gceSTATUS
++ (*setClock)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ );
++
++ /*******************************************************************************
++ **
++ ** reset
++ **
++ ** Reset GPU outside.
++ **
++ ** INPUT:
++ **
++ ** gceCORE GPU
++ ** GPU neeed to reset.
++ */
++ gceSTATUS
++ (*reset)(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU
++ );
++
++ /*******************************************************************************
++ **
++ ** getGPUPhysical
++ **
++ ** Convert CPU physical address to GPU physical address if they are
++ ** different.
++ */
++ gceSTATUS
++ (*getGPUPhysical)(
++ IN gckPLATFORM Platform,
++ IN gctUINT32 CPUPhysical,
++ OUT gctUINT32_PTR GPUPhysical
++ );
++
++ /*******************************************************************************
++ **
++ ** adjustProt
++ **
++ ** Override Prot flag when mapping paged memory to userspace.
++ */
++ gceSTATUS
++ (*adjustProt)(
++ IN struct vm_area_struct * vma
++ );
++
++ /*******************************************************************************
++ **
++ ** shrinkMemory
++ **
++ ** Do something to collect memory, eg, act as oom killer.
++ */
++ gceSTATUS
++ (*shrinkMemory)(
++ IN gckPLATFORM Platform
++ );
++
++ /*******************************************************************************
++ **
++ ** cache
++ **
++ ** Cache operation.
++ */
++ gceSTATUS
++ (*cache)(
++ IN gckPLATFORM Platform,
++ IN gctUINT32 ProcessID,
++ IN gctPHYS_ADDR Handle,
++ IN gctUINT32 Physical,
++ IN gctPOINTER Logical,
++ IN gctSIZE_T Bytes,
++ IN gceCACHEOPERATION Operation
++ );
++}
++gcsPLATFORM_OPERATIONS;
++
++typedef struct _gcsPLATFORM
++{
++ struct platform_device* device;
++ struct platform_driver* driver;
++
++ gcsPLATFORM_OPERATIONS* ops;
++
++ void* priv;
++}
++gcsPLATFORM;
++
++void
++gckPLATFORM_QueryOperations(
++ IN gcsPLATFORM_OPERATIONS ** Operations
++ );
++
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_probe.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_probe.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_probe.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_probe.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,1347 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <linux/device.h>
++#include <linux/slab.h>
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_driver.h"
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#ifdef CONFIG_PXA_DVFM
++# include <mach/dvfm.h>
++# include <mach/pxa3xx_dvfm.h>
++#endif
++
++
++/* Zone used for header/footer. */
++#define _GC_OBJ_ZONE gcvZONE_DRIVER
++
++MODULE_DESCRIPTION("Vivante Graphics Driver");
++MODULE_LICENSE("GPL");
++
++static struct class* gpuClass;
++
++static gcsPLATFORM platform;
++
++static gckGALDEVICE galDevice;
++
++static uint major = 199;
++module_param(major, uint, 0644);
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++static int irqLine3D0 = -1;
++module_param(irqLine3D0, int, 0644);
++
++static ulong registerMemBase3D0 = 0;
++module_param(registerMemBase3D0, ulong, 0644);
++
++static ulong registerMemSize3D0 = 2 << 10;
++module_param(registerMemSize3D0, ulong, 0644);
++
++static int irqLine3D1 = -1;
++module_param(irqLine3D1, int, 0644);
++
++static ulong registerMemBase3D1 = 0;
++module_param(registerMemBase3D1, ulong, 0644);
++
++static ulong registerMemSize3D1 = 2 << 10;
++module_param(registerMemSize3D1, ulong, 0644);
++#else
++static int irqLine = -1;
++module_param(irqLine, int, 0644);
++
++static ulong registerMemBase = 0x80000000;
++module_param(registerMemBase, ulong, 0644);
++
++static ulong registerMemSize = 2 << 10;
++module_param(registerMemSize, ulong, 0644);
++#endif
++
++static int irqLine2D = -1;
++module_param(irqLine2D, int, 0644);
++
++static ulong registerMemBase2D = 0x00000000;
++module_param(registerMemBase2D, ulong, 0644);
++
++static ulong registerMemSize2D = 2 << 10;
++module_param(registerMemSize2D, ulong, 0644);
++
++static int irqLineVG = -1;
++module_param(irqLineVG, int, 0644);
++
++static ulong registerMemBaseVG = 0x00000000;
++module_param(registerMemBaseVG, ulong, 0644);
++
++static ulong registerMemSizeVG = 2 << 10;
++module_param(registerMemSizeVG, ulong, 0644);
++
++#ifndef gcdDEFAULT_CONTIGUOUS_SIZE
++#define gcdDEFAULT_CONTIGUOUS_SIZE (4 << 20)
++#endif
++static ulong contiguousSize = gcdDEFAULT_CONTIGUOUS_SIZE;
++module_param(contiguousSize, ulong, 0644);
++
++static ulong contiguousBase = 0;
++module_param(contiguousBase, ulong, 0644);
++
++static ulong bankSize = 0;
++module_param(bankSize, ulong, 0644);
++
++static int fastClear = -1;
++module_param(fastClear, int, 0644);
++
++static int compression = -1;
++module_param(compression, int, 0644);
++
++static int powerManagement = -1;
++module_param(powerManagement, int, 0644);
++
++static int gpuProfiler = 0;
++module_param(gpuProfiler, int, 0644);
++
++static int signal = 48;
++module_param(signal, int, 0644);
++
++static ulong baseAddress = 0;
++module_param(baseAddress, ulong, 0644);
++
++static ulong physSize = 0;
++module_param(physSize, ulong, 0644);
++
++static uint logFileSize = 0;
++module_param(logFileSize,uint, 0644);
++
++static uint recovery = 1;
++module_param(recovery, uint, 0644);
++MODULE_PARM_DESC(recovery, "Recover GPU from stuck (1: Enable, 0: Disable)");
++
++/* Middle needs about 40KB buffer, Maximal may need more than 200KB buffer. */
++static uint stuckDump = 1;
++module_param(stuckDump, uint, 0644);
++MODULE_PARM_DESC(stuckDump, "Level of stuck dump content (1: Minimal, 2: Middle, 3: Maximal)");
++
++static int showArgs = 0;
++module_param(showArgs, int, 0644);
++
++static int mmu = 1;
++module_param(mmu, int, 0644);
++
++static int gpu3DMinClock = 1;
++
++static int contiguousRequested = 0;
++
++static int drv_open(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static int drv_release(
++ struct inode* inode,
++ struct file* filp
++ );
++
++static long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ );
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ );
++
++static struct file_operations driver_fops =
++{
++ .owner = THIS_MODULE,
++ .open = drv_open,
++ .release = drv_release,
++ .unlocked_ioctl = drv_ioctl,
++#ifdef HAVE_COMPAT_IOCTL
++ .compat_ioctl = drv_ioctl,
++#endif
++ .mmap = drv_mmap,
++};
++
++void
++_UpdateModuleParam(
++ gcsMODULE_PARAMETERS *Param
++ )
++{
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++#else
++ irqLine = Param->irqLine ;
++ registerMemBase = Param->registerMemBase;
++ registerMemSize = Param->registerMemSize;
++#endif
++ irqLine2D = Param->irqLine2D ;
++ registerMemBase2D = Param->registerMemBase2D;
++ registerMemSize2D = Param->registerMemSize2D;
++ irqLineVG = Param->irqLineVG;
++ registerMemBaseVG = Param->registerMemBaseVG;
++ registerMemSizeVG = Param->registerMemSizeVG;
++ contiguousSize = Param->contiguousSize;
++ contiguousBase = Param->contiguousBase;
++ bankSize = Param->bankSize;
++ fastClear = Param->fastClear;
++ compression = Param->compression;
++ powerManagement = Param->powerManagement;
++ gpuProfiler = Param->gpuProfiler;
++ signal = Param->signal;
++ baseAddress = Param->baseAddress;
++ physSize = Param->physSize;
++ logFileSize = Param->logFileSize;
++ recovery = Param->recovery;
++ stuckDump = Param->stuckDump;
++ showArgs = Param->showArgs;
++ contiguousRequested = Param->contiguousRequested;
++ gpu3DMinClock = Param->gpu3DMinClock;
++}
++
++void
++gckOS_DumpParam(
++ void
++ )
++{
++ printk("Galcore options:\n");
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ printk(" irqLine3D0 = %d\n", irqLine3D0);
++ printk(" registerMemBase3D0 = 0x%08lX\n", registerMemBase3D0);
++ printk(" registerMemSize3D0 = 0x%08lX\n", registerMemSize3D0);
++
++ if (irqLine3D1 != -1)
++ {
++ printk(" irqLine3D1 = %d\n", irqLine3D1);
++ printk(" registerMemBase3D1 = 0x%08lX\n", registerMemBase3D1);
++ printk(" registerMemSize3D1 = 0x%08lX\n", registerMemSize3D1);
++ }
++#else
++ printk(" irqLine = %d\n", irqLine);
++ printk(" registerMemBase = 0x%08lX\n", registerMemBase);
++ printk(" registerMemSize = 0x%08lX\n", registerMemSize);
++#endif
++
++ if (irqLine2D != -1)
++ {
++ printk(" irqLine2D = %d\n", irqLine2D);
++ printk(" registerMemBase2D = 0x%08lX\n", registerMemBase2D);
++ printk(" registerMemSize2D = 0x%08lX\n", registerMemSize2D);
++ }
++
++ if (irqLineVG != -1)
++ {
++ printk(" irqLineVG = %d\n", irqLineVG);
++ printk(" registerMemBaseVG = 0x%08lX\n", registerMemBaseVG);
++ printk(" registerMemSizeVG = 0x%08lX\n", registerMemSizeVG);
++ }
++
++ printk(" contiguousSize = %ld\n", contiguousSize);
++ printk(" contiguousBase = 0x%08lX\n", contiguousBase);
++ printk(" bankSize = 0x%08lX\n", bankSize);
++ printk(" fastClear = %d\n", fastClear);
++ printk(" compression = %d\n", compression);
++ printk(" signal = %d\n", signal);
++ printk(" powerManagement = %d\n", powerManagement);
++ printk(" baseAddress = 0x%08lX\n", baseAddress);
++ printk(" physSize = 0x%08lX\n", physSize);
++ printk(" logFileSize = %d KB \n", logFileSize);
++ printk(" recovery = %d\n", recovery);
++ printk(" stuckDump = %d\n", stuckDump);
++ printk(" gpuProfiler = %d\n", gpuProfiler);
++}
++
++int drv_open(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gctBOOL attached = gcvFALSE;
++ gcsHAL_PRIVATE_DATA_PTR data = gcvNULL;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = kmalloc(sizeof(gcsHAL_PRIVATE_DATA), GFP_KERNEL | __GFP_NOWARN);
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ data->device = galDevice;
++ data->mappedMemory = gcvNULL;
++ data->contiguousLogical = gcvNULL;
++ gcmkONERROR(gckOS_GetProcessID(&data->pidOpen));
++
++ /* Attached the process. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvTRUE));
++ }
++ }
++ attached = gcvTRUE;
++
++ if (!galDevice->contiguousMapped)
++ {
++ if (galDevice->contiguousPhysical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_MapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ &data->contiguousLogical
++ ));
++ }
++ }
++
++ filp->private_data = data;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ if (data != gcvNULL)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckOS_UnmapMemory(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical
++ ));
++ }
++
++ kfree(data);
++ }
++
++ if (attached)
++ {
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckKERNEL_AttachProcess(galDevice->kernels[i], gcvFALSE));
++ }
++ }
++ }
++
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++int drv_release(
++ struct inode* inode,
++ struct file* filp
++ )
++{
++ gceSTATUS status;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++ gctINT i;
++
++ gcmkHEADER_ARG("inode=0x%08X filp=0x%08X", inode, filp);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (!device->contiguousMapped)
++ {
++ if (data->contiguousLogical != gcvNULL)
++ {
++ gcmkONERROR(gckOS_UnmapMemoryEx(
++ galDevice->os,
++ galDevice->contiguousPhysical,
++ galDevice->contiguousSize,
++ data->contiguousLogical,
++ data->pidOpen
++ ));
++
++ data->contiguousLogical = gcvNULL;
++ }
++ }
++
++ /* A process gets detached. */
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (galDevice->kernels[i] != gcvNULL)
++ {
++ gcmkONERROR(gckKERNEL_AttachProcessEx(galDevice->kernels[i], gcvFALSE, data->pidOpen));
++ }
++ }
++
++ kfree(data);
++ filp->private_data = NULL;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++long drv_ioctl(
++ struct file* filp,
++ unsigned int ioctlCode,
++ unsigned long arg
++ )
++{
++ gceSTATUS status;
++ gcsHAL_INTERFACE iface;
++ gctUINT32 copyLen;
++ DRIVER_ARGS drvArgs;
++ gckGALDEVICE device;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gctINT32 i, count;
++ gckVIDMEM_NODE nodeObject;
++
++ gcmkHEADER_ARG(
++ "filp=0x%08X ioctlCode=0x%08X arg=0x%08X",
++ filp, ioctlCode, arg
++ );
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if ((ioctlCode != IOCTL_GCHAL_INTERFACE)
++ && (ioctlCode != IOCTL_GCHAL_KERNEL_INTERFACE)
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown command %d\n",
++ __FUNCTION__, __LINE__,
++ ioctlCode
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Get the drvArgs. */
++ copyLen = copy_from_user(
++ &drvArgs, (void *) arg, sizeof(DRIVER_ARGS)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of the input arguments.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Now bring in the gcsHAL_INTERFACE structure. */
++ if ((drvArgs.InputBufferSize != sizeof(gcsHAL_INTERFACE))
++ || (drvArgs.OutputBufferSize != sizeof(gcsHAL_INTERFACE))
++ )
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): input or/and output structures are invalid.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ copyLen = copy_from_user(
++ &iface, gcmUINT64_TO_PTR(drvArgs.InputBuffer), sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of input HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ if (iface.command == gcvHAL_CHIP_INFO)
++ {
++ count = 0;
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ iface.u.ChipInfo.types[count] = gcvHARDWARE_VG;
++ }
++ else
++#endif
++ {
++ gcmkVERIFY_OK(gckHARDWARE_GetType(device->kernels[i]->hardware,
++ &iface.u.ChipInfo.types[count]));
++ }
++ count++;
++ }
++ }
++
++ iface.u.ChipInfo.count = count;
++ iface.status = status = gcvSTATUS_OK;
++ }
++ else
++ {
++ if (iface.hardwareType > 7)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): unknown hardwareType %d\n",
++ __FUNCTION__, __LINE__,
++ iface.hardwareType
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if gcdENABLE_VG
++ if (device->coreMapping[iface.hardwareType] == gcvCORE_VG)
++ {
++ status = gckVGKERNEL_Dispatch(device->kernels[gcvCORE_VG],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ else
++#endif
++ {
++ status = gckKERNEL_Dispatch(device->kernels[device->coreMapping[iface.hardwareType]],
++ (ioctlCode == IOCTL_GCHAL_INTERFACE),
++ &iface);
++ }
++ }
++
++ /* Redo system call after pending signal is handled. */
++ if (status == gcvSTATUS_INTERRUPTED)
++ {
++ gcmkFOOTER();
++ return -ERESTARTSYS;
++ }
++
++ if (gcmIS_SUCCESS(status) && (iface.command == gcvHAL_LOCK_VIDEO_MEMORY))
++ {
++ gcuVIDMEM_NODE_PTR node;
++ gctUINT32 processID;
++
++ gckOS_GetProcessID(&processID);
++
++ gcmkONERROR(gckVIDMEM_HANDLE_Lookup(device->kernels[device->coreMapping[iface.hardwareType]],
++ processID,
++ (gctUINT32)iface.u.LockVideoMemory.node,
++ &nodeObject));
++ node = nodeObject->node;
++
++ /* Special case for mapped memory. */
++ if ((data->mappedMemory != gcvNULL)
++ && (node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
++ )
++ {
++ /* Compute offset into mapped memory. */
++ gctUINT32 offset
++ = (gctUINT8 *) gcmUINT64_TO_PTR(iface.u.LockVideoMemory.memory)
++ - (gctUINT8 *) device->contiguousBase;
++
++ /* Compute offset into user-mapped region. */
++ iface.u.LockVideoMemory.memory =
++ gcmPTR_TO_UINT64((gctUINT8 *) data->mappedMemory + offset);
++ }
++ }
++
++ /* Copy data back to the user. */
++ copyLen = copy_to_user(
++ gcmUINT64_TO_PTR(drvArgs.OutputBuffer), &iface, sizeof(gcsHAL_INTERFACE)
++ );
++
++ if (copyLen != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): error copying of output HAL interface.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++static int drv_mmap(
++ struct file* filp,
++ struct vm_area_struct* vma
++ )
++{
++ gceSTATUS status = gcvSTATUS_OK;
++ gcsHAL_PRIVATE_DATA_PTR data;
++ gckGALDEVICE device;
++
++ gcmkHEADER_ARG("filp=0x%08X vma=0x%08X", filp, vma);
++
++ if (filp == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): filp is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ data = filp->private_data;
++
++ if (data == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): private_data is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ device = data->device;
++
++ if (device == gcvNULL)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): device is NULL\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++#if !gcdPAGED_MEMORY_CACHEABLE
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ vma->vm_flags |= gcdVM_FLAGS;
++#endif
++ vma->vm_pgoff = 0;
++
++ if (device->contiguousMapped)
++ {
++ unsigned long size = vma->vm_end - vma->vm_start;
++ int ret = 0;
++
++ if (size > device->contiguousSize)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Invalid mapping size.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
++ }
++
++ ret = io_remap_pfn_range(
++ vma,
++ vma->vm_start,
++ device->requestedContiguousBase >> PAGE_SHIFT,
++ size,
++ vma->vm_page_prot
++ );
++
++ if (ret != 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): io_remap_pfn_range failed %d\n",
++ __FUNCTION__, __LINE__,
++ ret
++ );
++
++ data->mappedMemory = gcvNULL;
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ data->mappedMemory = (gctPOINTER) vma->vm_start;
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++ }
++
++OnError:
++ gcmkFOOTER();
++ return -ENOTTY;
++}
++
++
++#if !USE_PLATFORM_DRIVER
++static int __init drv_init(void)
++#else
++static int drv_init(void)
++#endif
++{
++ int ret;
++ int result = -EINVAL;
++ gceSTATUS status;
++ gckGALDEVICE device = gcvNULL;
++ struct class* device_class = gcvNULL;
++
++ gcsDEVICE_CONSTRUCT_ARGS args = {
++ .recovery = recovery,
++ .stuckDump = stuckDump,
++ .gpu3DMinClock = gpu3DMinClock,
++ .contiguousRequested = contiguousRequested,
++ .platform = &platform,
++ .mmu = mmu,
++ };
++
++ gcmkHEADER();
++
++ printk(KERN_INFO "Galcore version %d.%d.%d.%d\n",
++ gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
++
++#if !VIVANTE_PROFILER_PM
++ /* when enable gpu profiler, we need to turn off gpu powerMangement */
++ if (gpuProfiler)
++ {
++ powerManagement = 0;
++ }
++#endif
++
++ if (showArgs)
++ {
++ gckOS_DumpParam();
++ }
++
++ if (logFileSize != 0)
++ {
++ gckDEBUGFS_Initialize();
++ }
++
++ /* Create the GAL device. */
++ status = gckGALDEVICE_Construct(
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ irqLine3D0,
++ registerMemBase3D0, registerMemSize3D0,
++ irqLine3D1,
++ registerMemBase3D1, registerMemSize3D1,
++#else
++ irqLine,
++ registerMemBase, registerMemSize,
++#endif
++ irqLine2D,
++ registerMemBase2D, registerMemSize2D,
++ irqLineVG,
++ registerMemBaseVG, registerMemSizeVG,
++ contiguousBase, contiguousSize,
++ bankSize, fastClear, compression, baseAddress, physSize, signal,
++ logFileSize,
++ powerManagement,
++ gpuProfiler,
++ &args,
++ &device
++ );
++
++ if (gcmIS_ERROR(status))
++ {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the GAL device: status=%d\n",
++ __FUNCTION__, __LINE__, status);
++
++ goto OnError;
++ }
++
++ /* Start the GAL device. */
++ gcmkONERROR(gckGALDEVICE_Start(device));
++
++ if ((physSize != 0)
++ && (device->kernels[gcvCORE_MAJOR] != gcvNULL)
++ && (device->kernels[gcvCORE_MAJOR]->hardware->mmuVersion != 0))
++ {
++ /* Reset the base address */
++ device->baseAddress = 0;
++ }
++
++ /* Register the character device. */
++ ret = register_chrdev(major, DEVICE_NAME, &driver_fops);
++
++ if (ret < 0)
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Could not allocate major number for mmap.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ if (major == 0)
++ {
++ major = ret;
++ }
++
++ /* Create the device class. */
++ device_class = class_create(THIS_MODULE, "graphics_class");
++
++ if (IS_ERR(device_class))
++ {
++ gcmkTRACE_ZONE(
++ gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to create the class.\n",
++ __FUNCTION__, __LINE__
++ );
++
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
++ device_create(device_class, NULL, MKDEV(major, 0), NULL, DEVICE_NAME);
++#else
++ device_create(device_class, NULL, MKDEV(major, 0), DEVICE_NAME);
++#endif
++
++ galDevice = device;
++ gpuClass = device_class;
++
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine3D0=%d, contiguousSize=%lu, memBase3D0=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine3D0, contiguousSize, registerMemBase3D0
++ );
++#else
++ gcmkTRACE_ZONE(
++ gcvLEVEL_INFO, gcvZONE_DRIVER,
++ "%s(%d): irqLine=%d, contiguousSize=%lu, memBase=0x%lX\n",
++ __FUNCTION__, __LINE__,
++ irqLine, contiguousSize, registerMemBase
++ );
++#endif
++
++ /* Success. */
++ gcmkFOOTER_NO();
++ return 0;
++
++OnError:
++ /* Roll back. */
++ if (device_class != gcvNULL)
++ {
++ device_destroy(device_class, MKDEV(major, 0));
++ class_destroy(device_class);
++ }
++
++ if (device != gcvNULL)
++ {
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(device));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(device));
++ }
++
++ gcmkFOOTER();
++ return result;
++}
++
++#if !USE_PLATFORM_DRIVER
++static void __exit drv_exit(void)
++#else
++static void drv_exit(void)
++#endif
++{
++ gcmkHEADER();
++
++ gcmkASSERT(gpuClass != gcvNULL);
++ device_destroy(gpuClass, MKDEV(major, 0));
++ class_destroy(gpuClass);
++
++ unregister_chrdev(major, DEVICE_NAME);
++
++ gcmkVERIFY_OK(gckGALDEVICE_Stop(galDevice));
++ gcmkVERIFY_OK(gckGALDEVICE_Destroy(galDevice));
++
++ if(gckDEBUGFS_IsEnabled())
++ {
++ gckDEBUGFS_Terminate();
++ }
++
++ gcmkFOOTER_NO();
++}
++
++#if !USE_PLATFORM_DRIVER
++ module_init(drv_init);
++ module_exit(drv_exit);
++#else
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_probe(struct platform_device *pdev)
++#else
++static int __devinit gpu_probe(struct platform_device *pdev)
++#endif
++{
++ int ret = -ENODEV;
++ gcsMODULE_PARAMETERS moduleParam = {
++#if gcdMULTI_GPU || gcdMULTI_GPU_AFFINITY
++#else
++ .irqLine = irqLine,
++ .registerMemBase = registerMemBase,
++ .registerMemSize = registerMemSize,
++#endif
++ .irqLine2D = irqLine2D,
++ .registerMemBase2D = registerMemBase2D,
++ .registerMemSize2D = registerMemSize2D,
++ .irqLineVG = irqLineVG,
++ .registerMemBaseVG = registerMemBaseVG,
++ .registerMemSizeVG = registerMemSizeVG,
++ .contiguousSize = contiguousSize,
++ .contiguousBase = contiguousBase,
++ .bankSize = bankSize,
++ .fastClear = fastClear,
++ .compression = compression,
++ .powerManagement = powerManagement,
++ .gpuProfiler = gpuProfiler,
++ .signal = signal,
++ .baseAddress = baseAddress,
++ .physSize = physSize,
++ .logFileSize = logFileSize,
++ .recovery = recovery,
++ .stuckDump = stuckDump,
++ .showArgs = showArgs,
++ .gpu3DMinClock = gpu3DMinClock,
++ };
++
++ gcmkHEADER();
++
++ platform.device = pdev;
++
++ if (platform.ops->getPower)
++ {
++ if (gcmIS_ERROR(platform.ops->getPower(&platform)))
++ {
++ gcmkFOOTER_NO();
++ return ret;
++ }
++ }
++
++ if (platform.ops->adjustParam)
++ {
++ /* Override default module param. */
++ platform.ops->adjustParam(&platform, &moduleParam);
++
++ /* Update module param because drv_init() uses them directly. */
++ _UpdateModuleParam(&moduleParam);
++ }
++
++ ret = drv_init();
++
++ if (!ret)
++ {
++ platform_set_drvdata(pdev, galDevice);
++
++ gcmkFOOTER_NO();
++ return ret;
++ }
++
++ gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
++ return ret;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++static int gpu_remove(struct platform_device *pdev)
++#else
++static int __devexit gpu_remove(struct platform_device *pdev)
++#endif
++{
++ gcmkHEADER();
++
++ drv_exit();
++
++ if (platform.ops->putPower)
++ {
++ platform.ops->putPower(&platform);
++ }
++
++ gcmkFOOTER_NO();
++ return 0;
++}
++
++static int gpu_suspend(struct platform_device *dev, pm_message_t state)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++
++ device = platform_get_drvdata(dev);
++
++ if (!device)
++ {
++ return -1;
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++ /* Store states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_QueryPowerManagementState(device->kernels[i]->vg->hardware, &device->statesStored[i]);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_QueryPowerManagementState(device->kernels[i]->hardware, &device->statesStored[i]);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_OFF);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_OFF);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ }
++ }
++
++ return 0;
++}
++
++static int gpu_resume(struct platform_device *dev)
++{
++ gceSTATUS status;
++ gckGALDEVICE device;
++ gctINT i;
++ gceCHIPPOWERSTATE statesStored;
++
++ device = platform_get_drvdata(dev);
++
++ if (!device)
++ {
++ return -1;
++ }
++
++ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
++ {
++ if (device->kernels[i] != gcvNULL)
++ {
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, gcvPOWER_ON);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, gcvPOWER_ON);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++
++ /* Convert global state to crossponding internal state. */
++ switch(device->statesStored[i])
++ {
++ case gcvPOWER_OFF:
++ statesStored = gcvPOWER_OFF_BROADCAST;
++ break;
++ case gcvPOWER_IDLE:
++ statesStored = gcvPOWER_IDLE_BROADCAST;
++ break;
++ case gcvPOWER_SUSPEND:
++ statesStored = gcvPOWER_SUSPEND_BROADCAST;
++ break;
++ case gcvPOWER_ON:
++ statesStored = gcvPOWER_ON_AUTO;
++ break;
++ default:
++ statesStored = device->statesStored[i];
++ break;
++ }
++
++ /* Restore states. */
++#if gcdENABLE_VG
++ if (i == gcvCORE_VG)
++ {
++ status = gckVGHARDWARE_SetPowerManagementState(device->kernels[i]->vg->hardware, statesStored);
++ }
++ else
++#endif
++ {
++ status = gckHARDWARE_SetPowerManagementState(device->kernels[i]->hardware, statesStored);
++ }
++
++ if (gcmIS_ERROR(status))
++ {
++ return -1;
++ }
++ }
++ }
++
++ return 0;
++}
++
++#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
++#ifdef CONFIG_PM_SLEEP
++static int gpu_system_suspend(struct device *dev)
++{
++ pm_message_t state={0};
++ return gpu_suspend(to_platform_device(dev), state);
++}
++
++static int gpu_system_resume(struct device *dev)
++{
++ return gpu_resume(to_platform_device(dev));
++}
++#endif
++
++static const struct dev_pm_ops gpu_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(gpu_system_suspend, gpu_system_resume)
++};
++#endif
++
++static struct platform_driver gpu_driver = {
++ .probe = gpu_probe,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
++ .remove = gpu_remove,
++#else
++ .remove = __devexit_p(gpu_remove),
++#endif
++
++ .suspend = gpu_suspend,
++ .resume = gpu_resume,
++
++ .driver = {
++ .name = DEVICE_NAME,
++#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
++ .pm = &gpu_pm_ops,
++#endif
++ }
++};
++
++static int __init gpu_init(void)
++{
++ int ret = 0;
++
++ memset(&platform, 0, sizeof(gcsPLATFORM));
++
++ gckPLATFORM_QueryOperations(&platform.ops);
++
++ if (platform.ops == gcvNULL)
++ {
++ printk(KERN_ERR "galcore: No platform specific operations.\n");
++ ret = -ENODEV;
++ goto out;
++ }
++
++ if (platform.ops->allocPriv)
++ {
++ /* Allocate platform private data. */
++ if (gcmIS_ERROR(platform.ops->allocPriv(&platform)))
++ {
++ ret = -ENOMEM;
++ goto out;
++ }
++ }
++
++ if (platform.ops->needAddDevice
++ && platform.ops->needAddDevice(&platform))
++ {
++ /* Allocate device */
++ platform.device = platform_device_alloc(DEVICE_NAME, -1);
++ if (!platform.device)
++ {
++ printk(KERN_ERR "galcore: platform_device_alloc failed.\n");
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ /* Add device */
++ ret = platform_device_add(platform.device);
++ if (ret)
++ {
++ printk(KERN_ERR "galcore: platform_device_add failed.\n");
++ goto put_dev;
++ }
++ }
++
++ platform.driver = &gpu_driver;
++
++ if (platform.ops->adjustDriver)
++ {
++ /* Override default platform_driver struct. */
++ platform.ops->adjustDriver(&platform);
++ }
++
++ ret = platform_driver_register(&gpu_driver);
++ if (!ret)
++ {
++ goto out;
++ }
++
++ platform_device_del(platform.device);
++put_dev:
++ platform_device_put(platform.device);
++
++out:
++ return ret;
++}
++
++static void __exit gpu_exit(void)
++{
++ platform_driver_unregister(&gpu_driver);
++
++ if (platform.ops->needAddDevice
++ && platform.ops->needAddDevice(&platform))
++ {
++ platform_device_unregister(platform.device);
++ }
++
++ if (platform.priv)
++ {
++ /* Free platform private data. */
++ platform.ops->freePriv(&platform);
++ }
++}
++
++module_init(gpu_init);
++module_exit(gpu_exit);
++
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_security_channel.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,385 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include <linux/slab.h>
++
++#include "tee_client_api.h"
++
++#define _GC_OBJ_ZONE gcvZONE_OS
++
++#define GPU3D_UUID { 0xcc9f80ea, 0xa836, 0x11e3, { 0x9b, 0x07, 0x78, 0x2b, 0xcb, 0x5c, 0xf3, 0xe3 } }
++
++static const TEEC_UUID gpu3d_uuid = GPU3D_UUID;
++TEEC_Context teecContext;
++
++typedef struct _gcsSecurityChannel {
++ gckOS os;
++ TEEC_Session session;
++ int * virtual;
++ TEEC_SharedMemory inputBuffer;
++ gctUINT32 bytes;
++ gctPOINTER mutex;
++} gcsSecurityChannel;
++
++TEEC_SharedMemory *
++gpu3d_allocate_secure_mem(
++ gckOS Os,
++ unsigned int size
++ )
++{
++ TEEC_Result result;
++ TEEC_Context *context = &teecContext;
++ TEEC_SharedMemory *shm = NULL;
++ void *handle = NULL;
++ unsigned int phyAddr = 0xFFFFFFFF;
++ gceSTATUS status;
++ gctSIZE_T bytes = size;
++
++ shm = kmalloc(sizeof(TEEC_SharedMemory), GFP_KERNEL);
++
++ if (NULL == shm)
++ {
++ return NULL;
++ }
++
++ memset(shm, 0, sizeof(TEEC_SharedMemory));
++
++ status = gckOS_AllocatePagedMemoryEx(
++ Os,
++ gcvALLOC_FLAG_SECURITY,
++ bytes,
++ gcvNULL,
++ (gctPHYS_ADDR *)&handle);
++
++ if (gcmIS_ERROR(status))
++ {
++ kfree(shm);
++ return NULL;
++ }
++
++ status = gckOS_PhysicalToPhysicalAddress(
++ Os,
++ handle,
++ &phyAddr);
++
++ if (gcmIS_ERROR(status))
++ {
++ kfree(shm);
++ return NULL;
++ }
++
++ /* record the handle into shm->user_data */
++ shm->userdata = handle;
++
++ /* [b] Bulk input buffer. */
++ shm->size = size;
++ shm->flags = TEEC_MEM_INPUT;
++
++ /* Use TEE Client API to register the underlying memory buffer. */
++ shm->phyAddr = (void *)phyAddr;
++
++ result = TEEC_RegisterSharedMemory(
++ context,
++ shm);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gckOS_FreePagedMemory(Os, (gctPHYS_ADDR)handle, shm->size);
++ kfree(shm);
++ return NULL;
++ }
++
++ return shm;
++}
++
++void gpu3d_release_secure_mem(
++ gckOS Os,
++ void *shm_handle
++ )
++{
++ TEEC_SharedMemory *shm = shm_handle;
++ void * handle;
++
++ if (!shm)
++ {
++ return;
++ }
++
++ handle = shm->userdata;
++
++ TEEC_ReleaseSharedMemory(shm);
++ gckOS_FreePagedMemory(Os, (gctPHYS_ADDR)handle, shm->size);
++
++ kfree(shm);
++
++ return;
++}
++
++static TEEC_Result gpu3d_session_callback(
++ TEEC_Session* session,
++ uint32_t commandID,
++ TEEC_Operation* operation,
++ void* userdata
++ )
++{
++ gcsSecurityChannel *channel = userdata;
++
++ if (channel == gcvNULL)
++ {
++ return TEEC_ERROR_BAD_PARAMETERS;
++ }
++
++ switch(commandID)
++ {
++ case gcvTA_CALLBACK_ALLOC_SECURE_MEM:
++ {
++ uint32_t size = operation->params[0].value.a;
++ TEEC_SharedMemory *shm = NULL;
++
++ shm = gpu3d_allocate_secure_mem(channel->os, size);
++
++ /* use the value to save the pointer in client side */
++ operation->params[0].value.a = (uint32_t)shm;
++ operation->params[0].value.b = (uint32_t)shm->phyAddr;
++
++ break;
++ }
++ case gcvTA_CALLBACK_FREE_SECURE_MEM:
++ {
++ TEEC_SharedMemory *shm = (TEEC_SharedMemory *)operation->params[0].value.a;
++
++ gpu3d_release_secure_mem(channel->os, shm);
++ break;
++ }
++ default:
++ break;
++ }
++
++ return TEEC_SUCCESS;
++}
++
++gceSTATUS
++gckOS_OpenSecurityChannel(
++ IN gckOS Os,
++ IN gceCORE GPU,
++ OUT gctUINT32 *Channel
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ static bool initialized = gcvFALSE;
++ gcsSecurityChannel *channel = gcvNULL;
++
++ TEEC_Operation operation = {0};
++
++ /* Connect to TEE. */
++ if (initialized == gcvFALSE)
++ {
++ result = TEEC_InitializeContext(NULL, &teecContext);
++
++ if (result != TEEC_SUCCESS) {
++ gcmkONERROR(gcvSTATUS_CHIP_NOT_READY);
++ }
++
++ initialized = gcvTRUE;
++ }
++
++ /* Construct channel. */
++ gcmkONERROR(
++ gckOS_Allocate(Os, gcmSIZEOF(*channel), (gctPOINTER *)&channel));
++
++ gckOS_ZeroMemory(channel, gcmSIZEOF(gcsSecurityChannel));
++
++ channel->os = Os;
++
++ gcmkONERROR(gckOS_CreateMutex(Os, &channel->mutex));
++
++ /* Allocate shared memory for passing gcTA_INTERFACE. */
++ channel->bytes = gcmSIZEOF(gcsTA_INTERFACE);
++ channel->virtual = kmalloc(channel->bytes, GFP_KERNEL | __GFP_NOWARN);
++
++ if (!channel->virtual)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
++ }
++
++ channel->inputBuffer.size = channel->bytes;
++ channel->inputBuffer.flags = TEEC_MEM_INPUT | TEEC_MEM_OUTPUT;
++ channel->inputBuffer.phyAddr = (void *)virt_to_phys(channel->virtual);
++
++ result = TEEC_RegisterSharedMemory(&teecContext, &channel->inputBuffer);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
++ }
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_VALUE_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ operation.params[0].value.a = GPU;
++
++ /* Open session with TEE application. */
++ result = TEEC_OpenSession(
++ &teecContext,
++ &channel->session,
++ &gpu3d_uuid,
++ TEEC_LOGIN_USER,
++ NULL,
++ &operation,
++ NULL);
++
++ /* Prepare callback. */
++ TEEC_RegisterCallback(&channel->session, gpu3d_session_callback, channel);
++
++ *Channel = (gctUINT32)channel;
++
++ return gcvSTATUS_OK;
++
++OnError:
++ if (channel)
++ {
++ if (channel->virtual)
++ {
++ }
++
++ if (channel->mutex)
++ {
++ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, channel->mutex));
++ }
++
++ gcmkVERIFY_OK(gckOS_Free(Os, channel));
++ }
++
++ return status;
++}
++
++gceSTATUS
++gckOS_CloseSecurityChannel(
++ IN gctUINT32 Channel
++ )
++{
++ /* TODO . */
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++gckOS_CallSecurityService(
++ IN gctUINT32 Channel,
++ IN gcsTA_INTERFACE *Interface
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ gcsSecurityChannel *channel = (gcsSecurityChannel *)Channel;
++ TEEC_Operation operation = {0};
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(Channel != 0);
++
++ gckOS_AcquireMutex(channel->os, channel->mutex, gcvINFINITE);
++
++ gckOS_MemCopy(channel->virtual, Interface, channel->bytes);
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_MEMREF_PARTIAL_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ /* Note: we use the updated size in the MemRef output by the encryption. */
++ operation.params[0].memref.parent = &channel->inputBuffer;
++ operation.params[0].memref.offset = 0;
++ operation.params[0].memref.size = sizeof(gcsTA_INTERFACE);
++ operation.started = true;
++
++ /* Start the commit command within the TEE application. */
++ result = TEEC_InvokeCommand(
++ &channel->session,
++ gcvTA_COMMAND_DISPATCH,
++ &operation,
++ NULL);
++
++ gckOS_MemCopy(Interface, channel->virtual, channel->bytes);
++
++ gckOS_ReleaseMutex(channel->os, channel->mutex);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
++
++gceSTATUS
++gckOS_InitSecurityChannel(
++ IN gctUINT32 Channel
++ )
++{
++ gceSTATUS status;
++ TEEC_Result result;
++ gcsSecurityChannel *channel = (gcsSecurityChannel *)Channel;
++ TEEC_Operation operation = {0};
++
++ gcmkHEADER();
++ gcmkVERIFY_ARGUMENT(Channel != 0);
++
++ operation.paramTypes = TEEC_PARAM_TYPES(
++ TEEC_MEMREF_PARTIAL_INPUT,
++ TEEC_NONE,
++ TEEC_NONE,
++ TEEC_NONE);
++
++ /* Note: we use the updated size in the MemRef output by the encryption. */
++ operation.params[0].memref.parent = &channel->inputBuffer;
++ operation.params[0].memref.offset = 0;
++ operation.params[0].memref.size = gcmSIZEOF(gcsTA_INTERFACE);
++ operation.started = true;
++
++ /* Start the commit command within the TEE application. */
++ result = TEEC_InvokeCommand(
++ &channel->session,
++ gcvTA_COMMAND_INIT,
++ &operation,
++ NULL);
++
++ if (result != TEEC_SUCCESS)
++ {
++ gcmkONERROR(gcvSTATUS_GENERIC_IO);
++ }
++
++ gcmkFOOTER_NO();
++ return gcvSTATUS_OK;
++
++OnError:
++ gcmkFOOTER();
++ return status;
++}
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,174 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include <linux/kernel.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/syscalls.h>
++#include <linux/uaccess.h>
++
++#include "gc_hal_kernel_sync.h"
++
++#if gcdANDROID_NATIVE_FENCE_SYNC
++
++static struct sync_pt *
++viv_sync_pt_dup(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt *pt;
++ struct viv_sync_pt *src;
++ struct viv_sync_timeline *obj;
++
++ src = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ /* Create the new sync_pt. */
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = src->stamp;
++ pt->sync = src->sync;
++
++ /* Reference sync point. */
++ status = gckOS_ReferenceSyncPoint(obj->os, pt->sync);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *)pt;
++}
++
++static int
++viv_sync_pt_has_signaled(
++ struct sync_pt * sync_pt
++ )
++{
++ gceSTATUS status;
++ gctBOOL state;
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *)sync_pt;
++ obj = (struct viv_sync_timeline *)sync_pt->parent;
++
++ status = gckOS_QuerySyncPoint(obj->os, pt->sync, &state);
++
++ if (gcmIS_ERROR(status))
++ {
++ /* Error. */
++ return -1;
++ }
++
++ return state;
++}
++
++static int
++viv_sync_pt_compare(
++ struct sync_pt * a,
++ struct sync_pt * b
++ )
++{
++ int ret;
++ struct viv_sync_pt * pt1 = (struct viv_sync_pt *) a;
++ struct viv_sync_pt * pt2 = (struct viv_sync_pt *) b;
++
++ ret = (pt1->stamp < pt2->stamp) ? -1
++ : (pt1->stamp == pt2->stamp) ? 0
++ : 1;
++
++ return ret;
++}
++
++static void
++viv_sync_pt_free(
++ struct sync_pt * sync_pt
++ )
++{
++ struct viv_sync_pt * pt;
++ struct viv_sync_timeline * obj;
++
++ pt = (struct viv_sync_pt *) sync_pt;
++ obj = (struct viv_sync_timeline *) sync_pt->parent;
++
++ gckOS_DestroySyncPoint(obj->os, pt->sync);
++}
++
++static struct sync_timeline_ops viv_timeline_ops =
++{
++ .driver_name = "viv_sync",
++ .dup = viv_sync_pt_dup,
++ .has_signaled = viv_sync_pt_has_signaled,
++ .compare = viv_sync_pt_compare,
++ .free_pt = viv_sync_pt_free,
++};
++
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * name,
++ gckOS os
++ )
++{
++ struct viv_sync_timeline * obj;
++
++ obj = (struct viv_sync_timeline *)
++ sync_timeline_create(&viv_timeline_ops, sizeof(struct viv_sync_timeline), name);
++
++ obj->os = os;
++ obj->stamp = 0;
++
++ return obj;
++}
++
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * obj,
++ gctSYNC_POINT SyncPoint
++ )
++{
++ gceSTATUS status;
++ struct viv_sync_pt * pt;
++
++ pt = (struct viv_sync_pt *)
++ sync_pt_create(&obj->obj, sizeof(struct viv_sync_pt));
++
++ pt->stamp = obj->stamp++;
++ pt->sync = SyncPoint;
++
++ /* Dup signal. */
++ status = gckOS_ReferenceSyncPoint(obj->os, SyncPoint);
++
++ if (gcmIS_ERROR(status))
++ {
++ sync_pt_free((struct sync_pt *)pt);
++ return NULL;
++ }
++
++ return (struct sync_pt *) pt;
++}
++
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.h linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.h
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/gc_hal_kernel_sync.h 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,71 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2013 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#ifndef __gc_hal_kernel_sync_h_
++#define __gc_hal_kernel_sync_h_
++
++#include <linux/types.h>
++
++#include <uapi/sync.h>
++
++#include <gc_hal.h>
++#include <gc_hal_base.h>
++
++struct viv_sync_timeline
++{
++ /* Parent object. */
++ struct sync_timeline obj;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++
++ /* Pointer to os struct. */
++ gckOS os;
++};
++
++
++struct viv_sync_pt
++{
++ /* Parent object. */
++ struct sync_pt pt;
++
++ /* Reference sync point*/
++ gctSYNC_POINT sync;
++
++ /* Timestamp when sync_pt is created. */
++ gctUINT stamp;
++};
++
++/* Create viv_sync_timeline object. */
++struct viv_sync_timeline *
++viv_sync_timeline_create(
++ const char * Name,
++ gckOS Os
++ );
++
++/* Create viv_sync_pt object. */
++struct sync_pt *
++viv_sync_pt_create(
++ struct viv_sync_timeline * Obj,
++ gctSYNC_POINT SyncPoint
++ );
++
++#endif /* __gc_hal_kernel_sync_h_ */
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,880 @@
++/****************************************************************************
++*
++* Copyright (C) 2005 - 2014 by Vivante Corp.
++*
++* This program is free software; you can redistribute it and/or modify
++* it under the terms of the GNU General Public License as published by
++* the Free Software Foundation; either version 2 of the license, or
++* (at your option) any later version.
++*
++* This program is distributed in the hope that it will be useful,
++* but WITHOUT ANY WARRANTY; without even the implied warranty of
++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++* GNU General Public License for more details.
++*
++* You should have received a copy of the GNU General Public License
++* along with this program; if not write to the Free Software
++* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++*
++*****************************************************************************/
++
++
++#include "gc_hal_kernel_linux.h"
++#include "gc_hal_kernel_platform.h"
++#include "gc_hal_kernel_device.h"
++#include "gc_hal_driver.h"
++#include <linux/slab.h>
++
++#if USE_PLATFORM_DRIVER
++# include <linux/platform_device.h>
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#include <mach/viv_gpu.h>
++#else
++#include <linux/pm_runtime.h>
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <mach/busfreq.h>
++#else
++#include <linux/busfreq-imx6.h>
++#include <linux/reset.h>
++#endif
++#endif
++
++#include <linux/clk.h>
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
++#include <mach/hardware.h>
++#endif
++#include <linux/pm_runtime.h>
++
++#include <linux/regulator/consumer.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#include <linux/device_cooling.h>
++#define REG_THERMAL_NOTIFIER(a) register_devfreq_cooling_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_devfreq_cooling_notifier(a);
++#else
++extern int register_thermal_notifier(struct notifier_block *nb);
++extern int unregister_thermal_notifier(struct notifier_block *nb);
++#define REG_THERMAL_NOTIFIER(a) register_thermal_notifier(a);
++#define UNREG_THERMAL_NOTIFIER(a) unregister_thermal_notifier(a);
++#endif
++
++static int initgpu3DMinClock = 1;
++module_param(initgpu3DMinClock, int, 0644);
++
++struct platform_device *pdevice;
++
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++# include <linux/kernel.h>
++# include <linux/mm.h>
++# include <linux/oom.h>
++# include <linux/sched.h>
++
++struct task_struct *lowmem_deathpending;
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data);
++
++static struct notifier_block task_nb = {
++ .notifier_call = task_notify_func,
++};
++
++static int
++task_notify_func(struct notifier_block *self, unsigned long val, void *data)
++{
++ struct task_struct *task = data;
++
++ if (task == lowmem_deathpending)
++ lowmem_deathpending = NULL;
++
++ return NOTIFY_OK;
++}
++
++extern struct task_struct *lowmem_deathpending;
++static unsigned long lowmem_deathpending_timeout;
++
++static int force_contiguous_lowmem_shrink(IN gckKERNEL Kernel)
++{
++ struct task_struct *p;
++ struct task_struct *selected = NULL;
++ int tasksize;
++ int ret = -1;
++ int min_adj = 0;
++ int selected_tasksize = 0;
++ int selected_oom_adj;
++ /*
++ * If we already have a death outstanding, then
++ * bail out right away; indicating to vmscan
++ * that we have nothing further to offer on
++ * this pass.
++ *
++ */
++ if (lowmem_deathpending &&
++ time_before_eq(jiffies, lowmem_deathpending_timeout))
++ return 0;
++ selected_oom_adj = min_adj;
++
++ rcu_read_lock();
++ for_each_process(p) {
++ struct mm_struct *mm;
++ struct signal_struct *sig;
++ gcuDATABASE_INFO info;
++ int oom_adj;
++
++ task_lock(p);
++ mm = p->mm;
++ sig = p->signal;
++ if (!mm || !sig) {
++ task_unlock(p);
++ continue;
++ }
++ oom_adj = sig->oom_score_adj;
++ if (oom_adj < min_adj) {
++ task_unlock(p);
++ continue;
++ }
++
++ tasksize = 0;
++ task_unlock(p);
++ rcu_read_unlock();
++
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_VIDEO_MEMORY, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++ if (gckKERNEL_QueryProcessDB(Kernel, p->pid, gcvFALSE, gcvDB_CONTIGUOUS, &info) == gcvSTATUS_OK){
++ tasksize += info.counters.bytes / PAGE_SIZE;
++ }
++
++ rcu_read_lock();
++
++ if (tasksize <= 0)
++ continue;
++
++ gckOS_Print("<gpu> pid %d (%s), adj %d, size %d \n", p->pid, p->comm, oom_adj, tasksize);
++
++ if (selected) {
++ if (oom_adj < selected_oom_adj)
++ continue;
++ if (oom_adj == selected_oom_adj &&
++ tasksize <= selected_tasksize)
++ continue;
++ }
++ selected = p;
++ selected_tasksize = tasksize;
++ selected_oom_adj = oom_adj;
++ }
++ if (selected) {
++ gckOS_Print("<gpu> send sigkill to %d (%s), adj %d, size %d\n",
++ selected->pid, selected->comm,
++ selected_oom_adj, selected_tasksize);
++ lowmem_deathpending = selected;
++ lowmem_deathpending_timeout = jiffies + HZ;
++ force_sig(SIGKILL, selected);
++ ret = 0;
++ }
++ rcu_read_unlock();
++ return ret;
++}
++
++
++gceSTATUS
++_ShrinkMemory(
++ IN gckPLATFORM Platform
++ )
++{
++ struct platform_device *pdev;
++ gckGALDEVICE galDevice;
++ gckKERNEL kernel;
++
++ pdev = Platform->device;
++
++ galDevice = platform_get_drvdata(pdev);
++
++ kernel = galDevice->kernels[gcvCORE_MAJOR];
++
++ if (kernel != gcvNULL)
++ {
++ force_contiguous_lowmem_shrink(kernel);
++ }
++ else
++ {
++ gcmkPRINT("%s(%d) can't find kernel! ", __FUNCTION__, __LINE__);
++ }
++
++ return gcvSTATUS_OK;
++}
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++static int thermal_hot_pm_notify(struct notifier_block *nb, unsigned long event,
++ void *dummy)
++{
++ static gctUINT orgFscale, minFscale, maxFscale;
++ static gctBOOL bAlreadyTooHot = gcvFALSE;
++ gckHARDWARE hardware;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if (!galDevice)
++ {
++ /* GPU is not ready, so it is meaningless to change GPU freq. */
++ return NOTIFY_OK;
++ }
++
++ if (!galDevice->kernels[gcvCORE_MAJOR])
++ {
++ return NOTIFY_OK;
++ }
++
++ hardware = galDevice->kernels[gcvCORE_MAJOR]->hardware;
++
++ if (!hardware)
++ {
++ return NOTIFY_OK;
++ }
++
++ if (event && !bAlreadyTooHot) {
++ gckHARDWARE_GetFscaleValue(hardware,&orgFscale,&minFscale, &maxFscale);
++ gckHARDWARE_SetFscaleValue(hardware, minFscale);
++ bAlreadyTooHot = gcvTRUE;
++ gckOS_Print("System is too hot. GPU3D will work at %d/64 clock.\n", minFscale);
++ } else if (!event && bAlreadyTooHot) {
++ gckHARDWARE_SetFscaleValue(hardware, orgFscale);
++ gckOS_Print("Hot alarm is canceled. GPU3D clock will return to %d/64\n", orgFscale);
++ bAlreadyTooHot = gcvFALSE;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block thermal_hot_pm_notifier = {
++ .notifier_call = thermal_hot_pm_notify,
++ };
++
++static ssize_t show_gpu3DMinClock(struct device_driver *dev, char *buf)
++{
++ gctUINT currentf,minf,maxf;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ {
++ gckHARDWARE_GetFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware,
++ &currentf, &minf, &maxf);
++ }
++ snprintf(buf, PAGE_SIZE, "%d\n", minf);
++ return strlen(buf);
++}
++
++static ssize_t update_gpu3DMinClock(struct device_driver *dev, const char *buf, size_t count)
++{
++
++ gctINT fields;
++ gctUINT MinFscaleValue;
++ gckGALDEVICE galDevice;
++
++ galDevice = platform_get_drvdata(pdevice);
++ if(galDevice->kernels[gcvCORE_MAJOR])
++ {
++ fields = sscanf(buf, "%d", &MinFscaleValue);
++ if (fields < 1)
++ return -EINVAL;
++
++ gckHARDWARE_SetMinFscaleValue(galDevice->kernels[gcvCORE_MAJOR]->hardware,MinFscaleValue);
++ }
++
++ return count;
++}
++
++static DRIVER_ATTR(gpu3DMinClock, S_IRUGO | S_IWUSR, show_gpu3DMinClock, update_gpu3DMinClock);
++#endif
++
++
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++static const struct of_device_id mxs_gpu_dt_ids[] = {
++ { .compatible = "fsl,imx6q-gpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, mxs_gpu_dt_ids);
++#endif
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++struct contiguous_mem_pool {
++ struct dma_attrs attrs;
++ dma_addr_t phys;
++ void *virt;
++ size_t size;
++};
++#endif
++
++struct imx_priv {
++ /* Clock management.*/
++ struct clk *clk_3d_core;
++ struct clk *clk_3d_shader;
++ struct clk *clk_3d_axi;
++ struct clk *clk_2d_core;
++ struct clk *clk_2d_axi;
++ struct clk *clk_vg_axi;
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ /*Power management.*/
++ struct regulator *gpu_regulator;
++#endif
++#endif
++ /*Run time pm*/
++ struct device *pmdev;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct contiguous_mem_pool *pool;
++ struct reset_control *rstc[gcdMAX_GPU_COUNT];
++#endif
++};
++
++static struct imx_priv imxPriv;
++
++gceSTATUS
++gckPLATFORM_AdjustParam(
++ IN gckPLATFORM Platform,
++ OUT gcsMODULE_PARAMETERS *Args
++ )
++{
++ struct resource* res;
++ struct platform_device* pdev = Platform->device;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct device_node *dn =pdev->dev.of_node;
++ const u32 *prop;
++#else
++ struct viv_gpu_platform_data *pdata;
++#endif
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
++ if (res)
++ Args->baseAddress = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
++ if (res)
++ Args->irqLine = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
++ if (res)
++ {
++ Args->registerMemBase = res->start;
++ Args->registerMemSize = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
++ if (res)
++ Args->irqLine2D = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
++ if (res)
++ {
++ Args->registerMemBase2D = res->start;
++ Args->registerMemSize2D = res->end - res->start + 1;
++ }
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
++ if (res)
++ Args->irqLineVG = res->start;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
++ if (res)
++ {
++ Args->registerMemBaseVG = res->start;
++ Args->registerMemSizeVG = res->end - res->start + 1;
++ }
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ Args->contiguousBase = 0;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ prop = of_get_property(dn, "contiguousbase", NULL);
++ if(prop)
++ Args->contiguousBase = *prop;
++ of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
++#else
++ pdata = pdev->dev.platform_data;
++ if (pdata) {
++ Args->contiguousBase = pdata->reserved_mem_base;
++ Args->contiguousSize = pdata->reserved_mem_size;
++ }
++#endif
++ if (Args->contiguousSize == 0)
++ gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
++
++ Args->gpu3DMinClock = initgpu3DMinClock;
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_AllocPriv(
++ IN gckPLATFORM Platform
++ )
++{
++ Platform->priv = &imxPriv;
++
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ task_free_register(&task_nb);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_FreePriv(
++ IN gckPLATFORM Platform
++ )
++{
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ task_free_unregister(&task_nb);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_GetPower(
++ IN gckPLATFORM Platform
++ )
++{
++ struct device* pdev = &Platform->device->dev;
++ struct imx_priv *priv = Platform->priv;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct reset_control *rstc;
++#endif
++
++#ifdef CONFIG_PM
++ /*Init runtime pm for gpu*/
++ pm_runtime_enable(pdev);
++ priv->pmdev = pdev;
++#endif
++
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ rstc = devm_reset_control_get(pdev, "gpu3d");
++ priv->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;
++ rstc = devm_reset_control_get(pdev, "gpu2d");
++ priv->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;
++ rstc = devm_reset_control_get(pdev, "gpuvg");
++ priv->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ /*get gpu regulator*/
++ priv->gpu_regulator = regulator_get(pdev, "cpu_vddgpu");
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ priv->gpu_regulator = devm_regulator_get(pdev, "pu");
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if (IS_ERR(priv->gpu_regulator)) {
++ gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_DRIVER,
++ "%s(%d): Failed to get gpu regulator \n",
++ __FUNCTION__, __LINE__);
++ return gcvSTATUS_NOT_FOUND;
++ }
++#endif
++#endif
++
++ /*Initialize the clock structure*/
++ priv->clk_3d_core = clk_get(pdev, "gpu3d_clk");
++ if (!IS_ERR(priv->clk_3d_core)) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (cpu_is_mx6q()) {
++ priv->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(priv->clk_3d_shader)) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ priv->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++ }
++#else
++ priv->clk_3d_axi = clk_get(pdev, "gpu3d_axi_clk");
++ priv->clk_3d_shader = clk_get(pdev, "gpu3d_shader_clk");
++ if (IS_ERR(priv->clk_3d_shader)) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ priv->clk_3d_shader = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_shader_clk failed, disable 3d!\n");
++ }
++#endif
++ } else {
++ priv->clk_3d_core = NULL;
++ gckOS_Print("galcore: clk_get gpu3d_clk failed, disable 3d!\n");
++ }
++
++ priv->clk_2d_core = clk_get(pdev, "gpu2d_clk");
++ if (IS_ERR(priv->clk_2d_core)) {
++ priv->clk_2d_core = NULL;
++ gckOS_Print("galcore: clk_get 2d core clock failed, disable 2d/vg!\n");
++ } else {
++ priv->clk_2d_axi = clk_get(pdev, "gpu2d_axi_clk");
++ if (IS_ERR(priv->clk_2d_axi)) {
++ priv->clk_2d_axi = NULL;
++ gckOS_Print("galcore: clk_get 2d axi clock failed, disable 2d\n");
++ }
++
++ priv->clk_vg_axi = clk_get(pdev, "openvg_axi_clk");
++ if (IS_ERR(priv->clk_vg_axi)) {
++ priv->clk_vg_axi = NULL;
++ gckOS_Print("galcore: clk_get vg clock failed, disable vg!\n");
++ }
++ }
++
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ pdevice = Platform->device;
++ REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++ {
++ int ret = 0;
++ ret = driver_create_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock);
++ if(ret)
++ dev_err(&pdevice->dev, "create gpu3DMinClock attr failed (%d)\n", ret);
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_PutPower(
++ IN gckPLATFORM Platform
++ )
++{
++ struct imx_priv *priv = Platform->priv;
++
++ /*Disable clock*/
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ if (priv->clk_3d_axi) {
++ clk_put(priv->clk_3d_axi);
++ priv->clk_3d_axi = NULL;
++ }
++#endif
++ if (priv->clk_3d_core) {
++ clk_put(priv->clk_3d_core);
++ priv->clk_3d_core = NULL;
++ }
++ if (priv->clk_3d_shader) {
++ clk_put(priv->clk_3d_shader);
++ priv->clk_3d_shader = NULL;
++ }
++ if (priv->clk_2d_core) {
++ clk_put(priv->clk_2d_core);
++ priv->clk_2d_core = NULL;
++ }
++ if (priv->clk_2d_axi) {
++ clk_put(priv->clk_2d_axi);
++ priv->clk_2d_axi = NULL;
++ }
++ if (priv->clk_vg_axi) {
++ clk_put(priv->clk_vg_axi);
++ priv->clk_vg_axi = NULL;
++ }
++
++#ifdef CONFIG_PM
++ if(priv->pmdev)
++ pm_runtime_disable(priv->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (priv->gpu_regulator) {
++ regulator_put(priv->gpu_regulator);
++ priv->gpu_regulator = NULL;
++ }
++#endif
++
++#if gcdENABLE_FSCALE_VAL_ADJUST
++ UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
++
++ driver_remove_file(pdevice->dev.driver, &driver_attr_gpu3DMinClock);
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_SetPower(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ )
++{
++ struct imx_priv* priv = Platform->priv;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ int ret;
++#endif
++#endif
++
++ if (Enable)
++ {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(priv->gpu_regulator)) {
++ ret = regulator_enable(priv->gpu_regulator);
++ if (ret != 0)
++ gckOS_Print("%s(%d): fail to enable pu regulator %d!\n",
++ __FUNCTION__, __LINE__, ret);
++ }
++#else
++ imx_gpc_power_up_pu(true);
++#endif
++#endif
++
++#ifdef CONFIG_PM
++ pm_runtime_get_sync(priv->pmdev);
++#endif
++ }
++ else
++ {
++#ifdef CONFIG_PM
++ pm_runtime_put_sync(priv->pmdev);
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ if(!IS_ERR(priv->gpu_regulator))
++ regulator_disable(priv->gpu_regulator);
++#else
++ imx_gpc_power_up_pu(false);
++#endif
++#endif
++
++ }
++
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_SetClock(
++ IN gckPLATFORM Platform,
++ IN gceCORE GPU,
++ IN gctBOOL Enable
++ )
++{
++ struct imx_priv* priv = Platform->priv;
++ struct clk *clk_3dcore = priv->clk_3d_core;
++ struct clk *clk_3dshader = priv->clk_3d_shader;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct clk *clk_3d_axi = priv->clk_3d_axi;
++#endif
++ struct clk *clk_2dcore = priv->clk_2d_core;
++ struct clk *clk_2d_axi = priv->clk_2d_axi;
++ struct clk *clk_vg_axi = priv->clk_vg_axi;
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++ if (Enable) {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_enable(clk_3dcore);
++ if (cpu_is_mx6q())
++ clk_enable(clk_3dshader);
++ break;
++ case gcvCORE_2D:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_enable(clk_2dcore);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ } else {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ if (cpu_is_mx6q())
++ clk_disable(clk_3dshader);
++ clk_disable(clk_3dcore);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++#else
++ if (Enable) {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_prepare(clk_3dcore);
++ clk_enable(clk_3dcore);
++ clk_prepare(clk_3dshader);
++ clk_enable(clk_3dshader);
++ clk_prepare(clk_3d_axi);
++ clk_enable(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_prepare(clk_2dcore);
++ clk_enable(clk_2dcore);
++ clk_prepare(clk_2d_axi);
++ clk_enable(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_prepare(clk_2dcore);
++ clk_enable(clk_2dcore);
++ clk_prepare(clk_vg_axi);
++ clk_enable(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ } else {
++ switch (GPU) {
++ case gcvCORE_MAJOR:
++ clk_disable(clk_3dshader);
++ clk_unprepare(clk_3dshader);
++ clk_disable(clk_3dcore);
++ clk_unprepare(clk_3dcore);
++ clk_disable(clk_3d_axi);
++ clk_unprepare(clk_3d_axi);
++ break;
++ case gcvCORE_2D:
++ clk_disable(clk_2dcore);
++ clk_unprepare(clk_2dcore);
++ clk_disable(clk_2d_axi);
++ clk_unprepare(clk_2d_axi);
++ break;
++ case gcvCORE_VG:
++ clk_disable(clk_2dcore);
++ clk_unprepare(clk_2dcore);
++ clk_disable(clk_vg_axi);
++ clk_unprepare(clk_vg_axi);
++ break;
++ default:
++ break;
++ }
++ }
++#endif
++
++ return gcvSTATUS_OK;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++#ifdef CONFIG_PM
++static int gpu_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int gpu_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static struct dev_pm_ops gpu_pm_ops;
++#endif
++#endif
++
++gceSTATUS
++_AdjustDriver(
++ IN gckPLATFORM Platform
++ )
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ struct platform_driver * driver = Platform->driver;
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ driver->driver.of_match_table = mxs_gpu_dt_ids;
++#endif
++
++ /* Override PM callbacks to add runtime PM callbacks. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
++ /* Fill local structure with original value. */
++ memcpy(&gpu_pm_ops, driver->driver.pm, sizeof(struct dev_pm_ops));
++
++ /* Add runtime PM callback. */
++#ifdef CONFIG_PM
++ gpu_pm_ops.runtime_suspend = gpu_runtime_suspend;
++ gpu_pm_ops.runtime_resume = gpu_runtime_resume;
++ gpu_pm_ops.runtime_idle = NULL;
++#endif
++
++ /* Replace callbacks. */
++ driver->driver.pm = &gpu_pm_ops;
++#endif
++ return gcvSTATUS_OK;
++}
++
++gceSTATUS
++_Reset(
++ IN gckPLATFORM Platform,
++ gceCORE GPU
++ )
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
++#define SRC_SCR_OFFSET 0
++#define BP_SRC_SCR_GPU3D_RST 1
++#define BP_SRC_SCR_GPU2D_RST 4
++ void __iomem *src_base = IO_ADDRESS(SRC_BASE_ADDR);
++ gctUINT32 bit_offset,val;
++
++ if(GPU == gcvCORE_MAJOR) {
++ bit_offset = BP_SRC_SCR_GPU3D_RST;
++ } else if((GPU == gcvCORE_VG)
++ ||(GPU == gcvCORE_2D)) {
++ bit_offset = BP_SRC_SCR_GPU2D_RST;
++ } else {
++ return gcvSTATUS_INVALID_CONFIG;
++ }
++ val = __raw_readl(src_base + SRC_SCR_OFFSET);
++ val &= ~(1 << (bit_offset));
++ val |= (1 << (bit_offset));
++ __raw_writel(val, src_base + SRC_SCR_OFFSET);
++
++ while ((__raw_readl(src_base + SRC_SCR_OFFSET) &
++ (1 << (bit_offset))) != 0) {
++ }
++
++ return gcvSTATUS_NOT_SUPPORTED;
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
++ struct imx_priv* priv = Platform->priv;
++ struct reset_control *rstc = priv->rstc[GPU];
++ if (rstc)
++ reset_control_reset(rstc);
++#else
++ imx_src_reset_gpu((int)GPU);
++#endif
++ return gcvSTATUS_OK;
++}
++
++gcsPLATFORM_OPERATIONS platformOperations = {
++ .adjustParam = gckPLATFORM_AdjustParam,
++ .allocPriv = _AllocPriv,
++ .freePriv = _FreePriv,
++ .getPower = _GetPower,
++ .putPower = _PutPower,
++ .setPower = _SetPower,
++ .setClock = _SetClock,
++ .adjustDriver = _AdjustDriver,
++ .reset = _Reset,
++#ifdef CONFIG_GPU_LOW_MEMORY_KILLER
++ .shrinkMemory = _ShrinkMemory,
++#endif
++};
++
++void
++gckPLATFORM_QueryOperations(
++ IN gcsPLATFORM_OPERATIONS ** Operations
++ )
++{
++ *Operations = &platformOperations;
++}
++
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config
+--- linux-4.1.3/drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/hal/os/linux/kernel/platform/freescale/gc_hal_kernel_platform_imx6q14.config 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,15 @@
++EXTRA_CFLAGS += -DgcdDEFAULT_CONTIGUOUS_SIZE=134217728
++
++ifneq ($(CONFIG_ANDROID),)
++# build for android
++EXTRA_CFLAGS += -DgcdANDROID_NATIVE_FENCE_SYNC=3
++
++ifeq ($(CONFIG_SYNC),)
++$(warn CONFIG_SYNC is not set in kernel config)
++$(warn Android native fence sync needs CONFIG_SYNC)
++endif
++endif
++
++EXTRA_CFLAGS += -DLINUX_CMA_FSL=1
++ALLOCATOR_ARRAY_H_LOCATION := $(OS_KERNEL_DIR)/allocator/freescale
++CUSTOMER_ALLOCATOR_OBJS := $(ALLOCATOR_ARRAY_H_LOCATION)/gc_hal_kernel_allocator_cma.o
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/Kbuild linux-xbian-imx6/drivers/mxc/gpu-viv/Kbuild
+--- linux-4.1.3/drivers/mxc/gpu-viv/Kbuild 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/Kbuild 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,236 @@
++##############################################################################
++#
++# Copyright (C) 2005 - 2013 by Vivante Corp.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of the license, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not write to the Free Software
++# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++#
++##############################################################################
++
++
++#
++# Linux build file for kernel HAL driver.
++#
++
++AQROOT := $(srctree)/drivers/mxc/gpu-viv
++AQARCH := $(AQROOT)/arch/XAQ2
++AQVGARCH := $(AQROOT)/arch/GC350
++
++include $(AQROOT)/config
++
++KERNEL_DIR ?= $(TOOL_DIR)/kernel
++
++OS_KERNEL_DIR := hal/os/linux/kernel
++ARCH_KERNEL_DIR := arch/$(notdir $(AQARCH))/hal/kernel
++ARCH_VG_KERNEL_DIR := arch/$(notdir $(AQVGARCH))/hal/kernel
++HAL_KERNEL_DIR := hal/kernel
++
++# EXTRA_CFLAGS += -Werror
++
++OBJS := $(OS_KERNEL_DIR)/gc_hal_kernel_device.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_driver.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_linux.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_math.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_os.o \
++ $(OS_KERNEL_DIR)/gc_hal_kernel_debugfs.o
++
++OBJS += $(HAL_KERNEL_DIR)/gc_hal_kernel.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_db.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_debug.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_event.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_heap.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_video_memory.o \
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_power.o
++
++OBJS += $(ARCH_KERNEL_DIR)/gc_hal_kernel_context.o \
++ $(ARCH_KERNEL_DIR)/gc_hal_kernel_hardware.o
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++OBJS +=\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_command_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_interrupt_vg.o\
++ $(HAL_KERNEL_DIR)/gc_hal_kernel_mmu_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_command_vg.o\
++ $(ARCH_VG_KERNEL_DIR)/gc_hal_kernel_hardware_vg.o
++endif
++
++ifneq ($(CONFIG_SYNC),)
++OBJS += $(OS_KERNEL_DIR)/gc_hal_kernel_sync.o
++endif
++
++ifeq ($(KERNELRELEASE), )
++
++.PHONY: all clean install
++
++# Define targets.
++all:
++ @make V=$(V) ARCH=$(ARCH_TYPE) -C $(KERNEL_DIR) SUBDIRS=`pwd` modules
++
++clean:
++ @rm -rf $(OBJS)
++ @rm -rf modules.order Module.symvers
++ @find $(AQROOT) -name ".gc_*.cmd" | xargs rm -f
++
++install: all
++ @mkdir -p $(SDK_DIR)/drivers
++
++else
++
++
++EXTRA_CFLAGS += -DLINUX -DDRIVER
++
++ifeq ($(ENUM_WORKAROUND), 1)
++EXTRA_CFLAGS += -DENUM_WORKAROUND=1
++else
++EXTRA_CFLAGS += -DENUM_WORKAROUND=0
++endif
++
++ifeq ($(FLAREON),1)
++EXTRA_CFLAGS += -DFLAREON
++endif
++
++ifeq ($(DEBUG), 1)
++EXTRA_CFLAGS += -DDBG=1 -DDEBUG -D_DEBUG
++else
++EXTRA_CFLAGS += -DDBG=0
++endif
++
++ifeq ($(NO_DMA_COHERENT), 1)
++EXTRA_CFLAGS += -DNO_DMA_COHERENT
++endif
++
++ifeq ($(CONFIG_DOVE_GPU), 1)
++EXTRA_CFLAGS += -DCONFIG_DOVE_GPU=1
++endif
++
++ifneq ($(USE_PLATFORM_DRIVER), 0)
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=1
++else
++EXTRA_CFLAGS += -DUSE_PLATFORM_DRIVER=0
++endif
++
++
++EXTRA_CFLAGS += -DVIVANTE_PROFILER=1
++EXTRA_CFLAGS += -DVIVANTE_PROFILER_CONTEXT=1
++
++
++ifeq ($(ANDROID), 1)
++EXTRA_CFLAGS += -DANDROID=1
++endif
++
++ifeq ($(ENABLE_GPU_CLOCK_BY_DRIVER), 1)
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=1
++else
++EXTRA_CFLAGS += -DENABLE_GPU_CLOCK_BY_DRIVER=0
++endif
++
++ifeq ($(USE_NEW_LINUX_SIGNAL), 1)
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=1
++else
++EXTRA_CFLAGS += -DUSE_NEW_LINUX_SIGNAL=0
++endif
++
++ifeq ($(NO_USER_DIRECT_ACCESS_FROM_KERNEL), 1)
++EXTRA_CFLAGS += -DNO_USER_DIRECT_ACCESS_FROM_KERNEL=1
++else
++EXTRA_CFLAGS += -DNO_USER_DIRECT_ACCESS_FROM_KERNEL=0
++endif
++
++ifeq ($(FORCE_ALL_VIDEO_MEMORY_CACHED), 1)
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_CACHEABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_CACHEABLE=0
++endif
++
++ifeq ($(NONPAGED_MEMORY_BUFFERABLE), 1)
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=1
++else
++EXTRA_CFLAGS += -DgcdNONPAGED_MEMORY_BUFFERABLE=0
++endif
++
++ifeq ($(CACHE_FUNCTION_UNIMPLEMENTED), 1)
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=1
++else
++EXTRA_CFLAGS += -DgcdCACHE_FUNCTION_UNIMPLEMENTED=0
++endif
++
++ifeq ($(SUPPORT_SWAP_RECTANGLE), 1)
++EXTRA_CFLAGS += -DgcdSUPPORT_SWAP_RECTANGLE=1
++else
++EXTRA_CFLAGS += -DgcdSUPPORT_SWAP_RECTANGLE=0
++endif
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++EXTRA_CFLAGS += -DgcdENABLE_VG=1
++else
++EXTRA_CFLAGS += -DgcdENABLE_VG=0
++endif
++
++ifeq ($(CONFIG_SMP), y)
++EXTRA_CFLAGS += -DgcdSMP=1
++else
++EXTRA_CFLAGS += -DgcdSMP=0
++endif
++
++ifeq ($(VIVANTE_NO_3D),1)
++EXTRA_CFLAGS += -DVIVANTE_NO_3D
++endif
++
++ifeq ($(ENABLE_OUTER_CACHE_PATCH), 1)
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=1
++else
++EXTRA_CFLAGS += -DgcdENABLE_OUTER_CACHE_PATCH=0
++endif
++
++ifeq ($(USE_BANK_ALIGNMENT), 1)
++ EXTRA_CFLAGS += -DgcdENABLE_BANK_ALIGNMENT=1
++ ifneq ($(BANK_BIT_START), 0)
++ ifneq ($(BANK_BIT_END), 0)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_START=$(BANK_BIT_START)
++ EXTRA_CFLAGS += -DgcdBANK_BIT_END=$(BANK_BIT_END)
++ endif
++ endif
++
++ ifneq ($(BANK_CHANNEL_BIT), 0)
++ EXTRA_CFLAGS += -DgcdBANK_CHANNEL_BIT=$(BANK_CHANNEL_BIT)
++ endif
++endif
++
++ifneq ($(CONFIG_SYNC),)
++EXTRA_CFLAGS += -DgcdANDROID_NATIVE_FENCE_SYNC=1
++endif
++
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel/inc
++EXTRA_CFLAGS += -I$(AQROOT)/hal/kernel
++EXTRA_CFLAGS += -I$(AQARCH)/hal/kernel
++EXTRA_CFLAGS += -I$(AQROOT)/hal/os/linux/kernel
++
++ifeq ($(VIVANTE_ENABLE_VG), 1)
++EXTRA_CFLAGS += -I$(AQVGARCH)/hal/kernel
++endif
++
++obj-$(CONFIG_MXC_GPU_VIV) += galcore.o
++
++galcore-objs := $(OBJS)
++
++endif
+diff -Nur linux-4.1.3/drivers/mxc/gpu-viv/Kconfig linux-xbian-imx6/drivers/mxc/gpu-viv/Kconfig
+--- linux-4.1.3/drivers/mxc/gpu-viv/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/gpu-viv/Kconfig 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,9 @@
++menu "MXC Vivante GPU support"
++ depends on SOC_IMX6Q
++
++config MXC_GPU_VIV
++ tristate "MXC Vivante GPU support"
++ ---help---
++ Say Y to get the GPU driver support.
++
++endmenu
+diff -Nur linux-4.1.3/drivers/mxc/hdmi-cec/Kconfig linux-xbian-imx6/drivers/mxc/hdmi-cec/Kconfig
+--- linux-4.1.3/drivers/mxc/hdmi-cec/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/hdmi-cec/Kconfig 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,11 @@
++
++menu "MXC HDMI CEC (Consumer Electronics Control) support"
++
++config MXC_HDMI_CEC
++ tristate "Support for MXC HDMI CEC (Consumer Electronics Control)"
++ depends on MFD_MXC_HDMI
++ depends on FB_MXC_HDMI || DRM_IMX_HDMI
++ help
++ The HDMI CEC device implement low level protocol on i.MX6x platforms.
++
++endmenu
+diff -Nur linux-4.1.3/drivers/mxc/hdmi-cec/Makefile linux-xbian-imx6/drivers/mxc/hdmi-cec/Makefile
+--- linux-4.1.3/drivers/mxc/hdmi-cec/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/hdmi-cec/Makefile 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1 @@
++obj-$(CONFIG_MXC_HDMI_CEC) += mxc_hdmi-cec.o
+diff -Nur linux-4.1.3/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c linux-xbian-imx6/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c
+--- linux-4.1.3/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,786 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_hdmi-cec.c
++ *
++ * @brief HDMI CEC system initialization and file operation implementation
++ *
++ * @ingroup HDMI
++ */
++
++//#define DEBUG
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/workqueue.h>
++#include <linux/sizes.h>
++
++#include <linux/console.h>
++#include <linux/types.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/pinctrl/consumer.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "mxc_hdmi-cec.h"
++
++#define MAXCLIENTS 15
++
++struct hdmi_cec_priv {
++ u8 la;
++ struct list_head msg_head;
++ struct list_head client_node;
++ struct list_head wakeup_node;
++ wait_queue_head_t hdmi_cec_qm;
++ spinlock_t i_lock_cl;
++};
++
++struct hdmi_cec_event {
++ u8 event_type;
++ u8 msg_len;
++ u8 msg[MAX_MESSAGE_LEN];
++};
++
++struct hdmi_cec_event_list {
++ struct hdmi_cec_event data;
++ u8 libcec_la;
++ struct list_head msg_node;
++};
++
++struct hdmi_cec_shared {
++ bool write_busy;
++ int receive_error;
++ int send_error;
++ struct delayed_work hdmi_cec_work;
++ spinlock_t i_lock;
++ spinlock_t buffer_lock;
++ struct mutex m_lock;
++ struct list_head client_head;
++ struct list_head buffer_head;
++ int nr_ff;
++ u16 addresses;
++ u8 latest_cec_stat;
++ u32 physical_address;
++};
++
++static struct hdmi_cec_shared hdmi_cec_root;
++
++static bool hdmi_cec_state;
++static int hdmi_cec_ready = 0;
++static int hdmi_cec_major;
++static struct class *hdmi_cec_class;
++static u8 open_count = 0;
++
++static int in_worker = 0;
++
++static wait_queue_head_t hdmi_cec_qs, hdmi_cec_qw;
++
++static int SIGNAL_FREE_ARB = 0;
++
++static inline bool la_is_local(u8 la)
++{
++ return (la == 0xf) ? false : hdmi_cec_root.addresses & BIT(la);
++}
++
++static inline u8 get_o(u8 la)
++{
++ return (la >> 4);
++}
++
++static inline u8 get_o_e(struct hdmi_cec_event_list *event)
++{
++ return (event->data.msg[0] >> 4);
++}
++
++static inline u8 get_d(u8 la)
++{
++ return (la & 0x0f);
++}
++
++static inline u8 get_d_e(struct hdmi_cec_event_list *event)
++{
++ return (event->data.msg[0] & 0x0f);
++}
++
++int mxc_hdmi_cec_buffer(uint16_t offset, int len, u8 cec_type, u8 llla)
++{
++ struct hdmi_cec_event_list *event = NULL;
++ unsigned long flags;
++ u8 i;
++
++ event = kzalloc(sizeof(struct hdmi_cec_event_list), GFP_ATOMIC);
++ if (NULL == event) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return -ENOMEM;
++ }
++ event->data.msg_len = len;
++ if (!event->data.msg_len || event->data.msg_len > MAX_MESSAGE_LEN) {
++ pr_err("%s: Bad message size %d!\n", __func__, event->data.msg_len);
++ kfree(event);
++ return -E2BIG;
++ }
++
++ for (i = 0; i < event->data.msg_len; i++)
++ event->data.msg[i] = hdmi_readb(offset+i);
++
++ event->data.event_type = cec_type;
++ event->libcec_la = llla ? llla : get_o(event->data.msg[0]);
++
++ spin_lock_irqsave(&hdmi_cec_root.buffer_lock, flags);
++ list_add_tail(&event->msg_node, &hdmi_cec_root.buffer_head);
++ spin_unlock_irqrestore(&hdmi_cec_root.buffer_lock, flags);
++ return 0;
++}
++
++static irqreturn_t mxc_hdmi_cec_isr(int irq, void *data)
++{
++ u8 cec_stat = 0;
++ u8 val;
++ unsigned long flags;
++ irqreturn_t ret = IRQ_HANDLED;
++
++ cec_stat = hdmi_readb(HDMI_IH_CEC_STAT0);
++ if (!cec_stat || !hdmi_cec_state) {
++ ret = IRQ_NONE;
++ goto done;
++ }
++
++ pr_debug("%s: HDMI CEC interrupt handler\n", __func__);
++
++ spin_lock_irqsave(&hdmi_cec_root.i_lock, flags);
++
++ hdmi_writeb(0x7f, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(cec_stat, HDMI_IH_CEC_STAT0);
++
++ if (cec_stat & (HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_ARB_LOST)) {
++ hdmi_cec_root.send_error++;
++ SIGNAL_FREE_ARB = cec_stat & HDMI_IH_CEC_STAT0_ERROR_INIT ? SIGNAL_FREE_TIME_RESEND : SIGNAL_FREE_LOST;
++ pr_debug("%s: error %d\n", __func__, hdmi_cec_root.send_error);
++ wake_up(&hdmi_cec_qs);
++ }
++ if (cec_stat & (HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_DONE)) {
++ mxc_hdmi_cec_buffer(HDMI_CEC_TX_DATA0, hdmi_readb(HDMI_CEC_TX_CNT),
++ cec_stat & HDMI_IH_CEC_STAT0_NACK ? MESSAGE_TYPE_NOACK : MESSAGE_TYPE_SEND_SUCCESS, 0);
++ hdmi_cec_root.send_error = 0;
++ hdmi_cec_root.write_busy = false;
++ wake_up(&hdmi_cec_qs);
++ hdmi_writeb(0, HDMI_CEC_TX_CNT);
++ }
++ if (cec_stat & HDMI_IH_CEC_STAT0_EOM) {
++ mxc_hdmi_cec_buffer(HDMI_CEC_RX_DATA0, hdmi_readb(HDMI_CEC_RX_CNT), MESSAGE_TYPE_RECEIVE_SUCCESS, 0);
++ hdmi_writeb(0, HDMI_CEC_LOCK);
++ }
++
++ pr_debug("%s: HDMI CEC interrupt received %#x\n", __func__, cec_stat);
++ if (!in_worker) {
++ in_worker = 1;
++ schedule_delayed_work(&hdmi_cec_root.hdmi_cec_work, msecs_to_jiffies(20));
++ }
++
++ spin_unlock_irqrestore(&hdmi_cec_root.i_lock, flags);
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL;
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++
++done:
++ return ret;
++}
++
++static void __mxc_hdmi_cec_msg(struct hdmi_cec_event_list *event, struct hdmi_cec_priv *client, struct list_head *wakeup, u8 event_type)
++{
++ struct hdmi_cec_event_list *tevent = kzalloc(sizeof(struct hdmi_cec_event_list), GFP_KERNEL);
++ unsigned long flags;
++
++ if (!tevent) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return;
++ }
++ memcpy(&tevent->data, &event->data, min(sizeof(struct hdmi_cec_event), (size_t)(event->data.msg_len + 2)));
++ if (event_type)
++ tevent->data.event_type = event_type;
++
++ spin_lock_irqsave(&client->i_lock_cl, flags);
++ list_add_tail(&tevent->msg_node, &client->msg_head);
++ spin_unlock_irqrestore(&client->i_lock_cl, flags);
++
++ if (wakeup)
++ list_add_tail(&client->wakeup_node, wakeup);
++}
++
++void mxc_hdmi_cec_handle(u32 cec_stat)
++{
++ struct hdmi_cec_event_list *event = NULL;
++ struct hdmi_cec_priv *client = NULL;
++
++ if (cec_stat)
++ hdmi_cec_root.physical_address = cec_stat;
++
++ /* HDMI cable connected / HDMI cable disconnected */
++ if (!hdmi_cec_ready)
++ return;
++ pr_debug("%s: enter\n", __func__);
++
++ event = kzalloc(sizeof(struct hdmi_cec_event_list), GFP_ATOMIC);
++ if (!event) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return;
++ }
++ event->data.event_type = cec_stat ?
++ MESSAGE_TYPE_CONNECTED : MESSAGE_TYPE_DISCONNECTED;
++
++ list_for_each_entry(client, &hdmi_cec_root.client_head, client_node) {
++ mutex_lock(&hdmi_cec_root.m_lock);
++ __mxc_hdmi_cec_msg(event, client, NULL, 0);
++ mutex_unlock(&hdmi_cec_root.m_lock);
++ wake_up(&client->hdmi_cec_qm);
++ }
++ pr_debug("%s: exit\n", __func__);
++}
++EXPORT_SYMBOL(mxc_hdmi_cec_handle);
++
++void mxc_hdmi_cec_msg(void)
++{
++ struct hdmi_cec_event_list *event = NULL;
++ struct hdmi_cec_priv *client = NULL;
++ unsigned long flags;
++ LIST_HEAD(wakeup);
++
++ spin_lock_irqsave(&hdmi_cec_root.buffer_lock, flags);
++ event = list_first_entry_or_null(&hdmi_cec_root.buffer_head, struct hdmi_cec_event_list, msg_node);
++ if (!event) {
++ spin_unlock_irqrestore(&hdmi_cec_root.buffer_lock, flags);
++ return;
++ }
++ list_del_init(&event->msg_node);
++ spin_unlock_irqrestore(&hdmi_cec_root.buffer_lock, flags);
++
++ event->data.event_type = (event->data.event_type == MESSAGE_TYPE_NOACK && la_is_local(event->data.msg[0] & 0x0f)) ?
++ MESSAGE_TYPE_SEND_SUCCESS : event->data.event_type;
++
++ list_for_each_entry(client, &hdmi_cec_root.client_head, client_node) {
++ if (client->la == 0xff) continue;
++ pr_debug("client %d: MSG from %d to %d, %x. (clla: %d) ET: %d\n", client->la, get_o(event->data.msg[0]), get_d(event->data.msg[0]), event->data.msg[1], event->libcec_la, event->data.event_type);
++
++ if ((event->data.event_type == MESSAGE_TYPE_RECEIVE_SUCCESS &&
++ ((get_d_e(event) == 15 && event->libcec_la != client->la) || (event->data.msg[0] & 0x0f) == client->la))
++ ||
++ (event->data.event_type != MESSAGE_TYPE_RECEIVE_SUCCESS &&
++ ((get_o_e(event) == get_d_e(event) && client->la == 15) || client->la == get_o_e(event))) )
++ {
++ pr_debug("writing to %d\n", client->la);
++ __mxc_hdmi_cec_msg(event, client, &wakeup, 0);
++ }
++
++ if ((event->data.event_type == MESSAGE_TYPE_SEND_SUCCESS) &&
++ ((la_is_local(event->data.msg[0] & 0x0f) && (event->data.msg[0] & 0x0f) == client->la)
++ ||
++ ((event->data.msg[0] & 0x0f) == 15 && !event->libcec_la)) )
++ {
++ pr_debug("writing to %d\n", client->la);
++ __mxc_hdmi_cec_msg(event, client, &wakeup, MESSAGE_TYPE_RECEIVE_SUCCESS);
++ }
++ }
++ list_for_each_entry(client, &wakeup, wakeup_node)
++ wake_up(&(client->hdmi_cec_qm));
++ kfree(event);
++}
++
++static void mxc_hdmi_cec_worker(struct work_struct *work)
++{
++ pr_debug("%s: \n", __func__);
++ while (!list_empty_careful(&hdmi_cec_root.buffer_head))
++ mxc_hdmi_cec_msg();
++ in_worker = 0;
++ pr_debug("%s: exit\n", __func__);
++}
++
++static long hdmi_cec_set_address(u8 arg, struct hdmi_cec_priv *hdmi_cec);
++/*!
++ * @brief open function for cec file operation
++ *
++ * @return 0 on success or negative error code on error
++ */
++static int hdmi_cec_open(struct inode *inode, struct file *filp)
++{
++ struct hdmi_cec_priv *hdmi_cec = NULL;
++
++ mutex_lock(&hdmi_cec_root.m_lock);
++ if (open_count == MAXCLIENTS || !hdmi_cec_ready) {
++ mutex_unlock(&hdmi_cec_root.m_lock);
++ return -EBUSY;
++ }
++ open_count++;
++
++ hdmi_cec = kzalloc(sizeof(struct hdmi_cec_priv), GFP_KERNEL);
++ hdmi_cec->la = -1;
++ init_waitqueue_head(&(hdmi_cec->hdmi_cec_qm));
++ spin_lock_init(&(hdmi_cec->i_lock_cl));
++ INIT_LIST_HEAD(&hdmi_cec->msg_head);
++
++ filp->private_data = (void *)(hdmi_cec);
++
++ list_add_tail(&hdmi_cec->client_node, &hdmi_cec_root.client_head);
++ hdmi_cec_set_address(15, hdmi_cec);
++ mutex_unlock(&hdmi_cec_root.m_lock);
++
++ return 0;
++}
++
++static ssize_t hdmi_cec_read(struct file *file, char __user *buf, size_t count,
++ loff_t *ppos)
++{
++ struct hdmi_cec_priv *hdmi_cec = file->private_data;
++ int ret = 0;
++
++ if (!open_count || hdmi_cec->la == 0xff)
++ return -ENODEV;
++
++ pr_debug("%s: client la %x, (addr %x)\n", __func__, hdmi_cec->la, (unsigned int)hdmi_cec);
++
++ count = min(count, sizeof(struct hdmi_cec_event));
++ do {
++ unsigned long flags;
++ struct hdmi_cec_event_list *event = NULL;
++
++ spin_lock_irqsave(&(hdmi_cec->i_lock_cl), flags);
++ if (!list_empty(&hdmi_cec->msg_head)) {
++ event = list_first_entry_or_null(&hdmi_cec->msg_head, struct hdmi_cec_event_list, msg_node);
++ list_del(&event->msg_node);
++ }
++ spin_unlock_irqrestore(&(hdmi_cec->i_lock_cl), flags);
++
++ if (event) {
++ ret = copy_to_user(buf, &event->data, count) ? -EFAULT : count;
++ kfree(event);
++ }
++ else if (file->f_flags & O_NONBLOCK) {
++ ret = -EAGAIN;
++ }
++ else if (wait_event_interruptible(hdmi_cec->hdmi_cec_qm, (!list_empty(&hdmi_cec->msg_head)))) {
++ ret = -ERESTARTSYS;
++ }
++ } while(!ret);
++
++ pr_debug("%s: exit %d\n", __func__, ret);
++ return ret;
++}
++
++static ssize_t hdmi_cec_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct hdmi_cec_priv *hdmi_cec = file->private_data;
++ int ret = 0 , i = 0;
++ u8 msg[MAX_MESSAGE_LEN];
++ u8 val = 0;
++ int timeout = 1500;
++
++ if (!open_count || hdmi_cec->la == 0xff)
++ return -ENODEV;
++
++ if (count > MAX_MESSAGE_LEN)
++ return -E2BIG;
++
++ memset(&msg, 0, MAX_MESSAGE_LEN);
++ if (copy_from_user(&msg, buf, count))
++ return -EFAULT;
++
++ if (file->f_flags & O_NONBLOCK && hdmi_cec_root.write_busy)
++ return -EAGAIN;
++ else if (wait_event_interruptible(hdmi_cec_qw, (!hdmi_cec_root.write_busy)))
++ return -ERESTARTSYS;
++
++ mutex_lock(&hdmi_cec_root.m_lock);
++ pr_debug("%s: \n", __func__);
++ hdmi_cec_root.write_busy = true;
++
++ hdmi_writeb(count, HDMI_CEC_TX_CNT);
++ for (i = 0; i < count; i++)
++ hdmi_writeb(msg[i], HDMI_CEC_TX_DATA0+i);
++
++ if (get_d(msg[0]) == 15) {
++ ret = count;
++ mxc_hdmi_cec_buffer(HDMI_CEC_TX_DATA0, count, MESSAGE_TYPE_RECEIVE_SUCCESS, hdmi_cec->la);
++ pr_debug("%s: wait_event la_is_local\n", __func__);
++ hdmi_cec_root.write_busy = false;
++ }
++
++ do {
++ val = hdmi_readb(HDMI_CEC_CTRL); val |= 0x01; val &= ~0x6;
++ val |= SIGNAL_FREE_ARB;
++ hdmi_writeb(val, HDMI_CEC_CTRL);
++ SIGNAL_FREE_ARB = SIGNAL_FREE_TIME_NORMAL;
++
++ ret = wait_event_timeout(hdmi_cec_qs, !((val = hdmi_readb(HDMI_CEC_CTRL)) & 0x01), msecs_to_jiffies(timeout));
++ pr_debug("%s: wait_event ret %d\n", __func__, ret);
++ if (hdmi_cec_root.send_error > 5 || ret < 2) {
++ hdmi_writeb(0, HDMI_CEC_TX_CNT);
++ hdmi_cec_root.write_busy = false;
++ ret = -EIO;
++ } else if (hdmi_cec_root.send_error && ret > 1) {
++ pr_debug("%s: --- resending msg\n", __func__);
++ timeout = jiffies_to_msecs(ret);
++ ret = 0;
++ } else if (ret > 1) {
++ ret = count;
++ }
++ } while(!ret);
++
++ mutex_unlock(&hdmi_cec_root.m_lock);
++ wake_up(&hdmi_cec_qw);
++ return ret;
++}
++
++static void hdmi_cec_hwenable(void)
++{
++ u8 val;
++
++ pr_debug("%s: \n", __func__);
++ hdmi_cec_state = true;
++
++ val = hdmi_readb(HDMI_MC_CLKDIS);
++ val &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ hdmi_writeb(val, HDMI_MC_CLKDIS);
++
++ val = HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_NACK |
++ HDMI_IH_CEC_STAT0_EOM | HDMI_IH_CEC_STAT0_DONE |
++ HDMI_IH_CEC_STAT0_ARB_LOST;
++ hdmi_writeb(val, HDMI_CEC_POLARITY);
++
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL;
++ hdmi_writeb(val, HDMI_CEC_MASK);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(0x0, HDMI_CEC_LOCK);
++ hdmi_writeb(0x2, HDMI_CEC_CTRL);
++}
++
++static void hdmi_cec_hwdisable(void)
++{
++ u8 val;
++
++ pr_debug("%s: \n", __func__);
++ hdmi_cec_state = false;
++
++ hdmi_writeb(0x10, HDMI_CEC_CTRL);
++
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL |
++ HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_ARB_LOST |
++ HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM |
++ HDMI_IH_CEC_STAT0_DONE;
++ hdmi_writeb(val, HDMI_CEC_MASK);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++
++ hdmi_writeb(0x0, HDMI_CEC_POLARITY);
++
++ val = hdmi_readb(HDMI_MC_CLKDIS);
++ val |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ hdmi_writeb(val, HDMI_MC_CLKDIS);
++}
++
++static long hdmi_cec_set_address(u8 arg, struct hdmi_cec_priv *hdmi_cec)
++{
++
++ if (hdmi_cec->la == arg)
++ return 0;
++
++ pr_debug("%s: to %d\n", __func__, arg);
++
++ if (arg == 15)
++ hdmi_cec_root.nr_ff++;
++ if (hdmi_cec->la == 15)
++ hdmi_cec_root.nr_ff--;
++ else
++ hdmi_cec_root.addresses &= ~BIT(hdmi_cec->la);
++
++ hdmi_cec->la = arg;
++ if ((u8)arg != 0xff)
++ hdmi_cec_root.addresses |= BIT(arg);
++ else
++ wake_up(&hdmi_cec->hdmi_cec_qm);
++
++ if (!hdmi_cec_root.nr_ff)
++ hdmi_cec_root.addresses &= ~BIT(15);
++
++ hdmi_writeb(hdmi_cec_root.addresses & 0xff, HDMI_CEC_ADDR_L);
++ /*
++ * Don't register LA = 15 to with hardware. with it set, broadcast messages are never
++ * sent (considered local by CEC controler)
++ */
++ hdmi_writeb((hdmi_cec_root.addresses & 0x7f00) >> 8, HDMI_CEC_ADDR_H);
++ return 0;
++}
++
++/*!
++ * @brief IO ctrl function for vpu file operation
++ * @param cmd IO ctrl command
++ * @return 0 on success or negative error code on error
++ */
++static long hdmi_cec_ioctl(struct file *filp, u_int cmd,
++ u_long arg)
++{
++ int ret = 0, i;
++ struct hdmi_cec_priv *hdmi_cec = filp->private_data;
++
++ pr_debug("%s: \n", __func__);
++
++ if (!open_count)
++ return -ENODEV;
++
++ switch (cmd) {
++ case HDMICEC_IOC_SETLOGICALADDRESS:
++ mutex_lock(&hdmi_cec_root.m_lock);
++ ret = hdmi_cec_set_address(arg, hdmi_cec);
++ mutex_unlock(&hdmi_cec_root.m_lock);
++ /*
++ * in case we have more clients, inform them about PA change.
++ * (if libCEC is not in monitoring mode, it won't allow more
++ * clients with same PA - it changes all previous holders of
++ * that 'taken' PA to 1000.
++ * to avoid that, we expand PA further by replacing first empty
++ * dimension with LA (for instance if our PA is 2.2.0.0, we
++ * change to 2.2.X.0 all concurent clients, where X is actual LA
++ */
++ if (open_count > 1 && arg != 15)
++ mxc_hdmi_cec_handle(hdmi_cec_root.physical_address);
++ break;
++
++ case HDMICEC_IOC_STARTDEVICE:
++ if (!hdmi_cec_state)
++ hdmi_cec_hwenable();
++ break;
++
++ case HDMICEC_IOC_STOPDEVICE:
++ hdmi_cec_set_address(-1, hdmi_cec);
++ if (hdmi_cec_state && open_count < 2)
++ hdmi_cec_hwdisable();
++ break;
++
++ case HDMICEC_IOC_GETPHYADDRESS:
++ ret = copy_to_user((void __user *)arg, &hdmi_cec_root.physical_address,
++ 4*sizeof(u8))?-EFAULT:0;
++ if (open_count > 1) {
++ for (i = 0; i < 4 && ((u8*)arg)[i] != 0x0; i++);
++ ((u8*)arg)[i] = hdmi_cec->la;
++ }
++ break;
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++void hdmi_cec_start_device(void)
++{
++ if(open_count && hdmi_cec_ready && !hdmi_cec_state)
++ hdmi_cec_hwenable();
++}
++EXPORT_SYMBOL(hdmi_cec_start_device);
++
++void hdmi_cec_stop_device(void)
++{
++ if(hdmi_cec_ready && hdmi_cec_state)
++ hdmi_cec_hwdisable();
++}
++EXPORT_SYMBOL(hdmi_cec_stop_device);
++
++/*!
++* @brief Release function for vpu file operation
++* @return 0 on success or negative error code on error
++*/
++static int hdmi_cec_release(struct inode *inode, struct file *filp)
++{
++ struct hdmi_cec_priv *hdmi_cec = filp->private_data;
++ struct hdmi_cec_priv *client, *t;
++ unsigned long flags;
++
++ pr_debug("%s: \n", __func__);
++
++ mutex_lock(&hdmi_cec_root.m_lock);
++ spin_lock_irqsave(&hdmi_cec->i_lock_cl, flags);
++
++ hdmi_cec_set_address(-1, hdmi_cec);
++ if (!--open_count)
++ hdmi_cec_hwdisable();
++ while (!list_empty(&hdmi_cec->msg_head)) {
++ struct hdmi_cec_event_list *event = NULL;
++
++ event = list_first_entry(&hdmi_cec->msg_head, struct hdmi_cec_event_list, msg_node);
++ list_del(&event->msg_node);
++ kfree(event);
++ }
++ spin_unlock_irqrestore(&hdmi_cec->i_lock_cl, flags);
++
++ list_for_each_entry_safe(client, t, &hdmi_cec_root.client_head, client_node) {
++ if (client == hdmi_cec) {
++ list_del(&client->client_node);
++ filp->private_data = NULL;
++ kfree(client);
++ }
++ }
++
++ mutex_unlock(&hdmi_cec_root.m_lock);
++ return 0;
++}
++
++static unsigned int hdmi_cec_poll(struct file *file, poll_table *wait)
++{
++ unsigned int mask = 0;
++ struct hdmi_cec_priv *hdmi_cec = file->private_data;
++
++ pr_debug("%s: poll client %lx, la %d\n", __func__, (unsigned long)hdmi_cec, hdmi_cec->la);
++
++ if (hdmi_cec->la == 0xff)
++ return POLLHUP;
++
++ poll_wait(file, &hdmi_cec->hdmi_cec_qm, wait);
++ poll_wait(file, &hdmi_cec_qw, wait);
++
++ if (!hdmi_cec_root.write_busy)
++ mask = (POLLOUT | POLLWRNORM);
++ if (!list_empty(&hdmi_cec->msg_head))
++ mask |= (POLLIN | POLLRDNORM);
++
++ return mask;
++}
++
++const struct file_operations hdmi_cec_fops = {
++ .owner = THIS_MODULE,
++ .read = hdmi_cec_read,
++ .write = hdmi_cec_write,
++ .open = hdmi_cec_open,
++ .unlocked_ioctl = hdmi_cec_ioctl,
++ .release = hdmi_cec_release,
++ .poll = hdmi_cec_poll,
++};
++
++static int hdmi_cec_dev_probe(struct platform_device *pdev)
++{
++ int err = 0;
++ struct device *temp_class;
++ struct resource *res;
++ struct pinctrl *pinctrl;
++ int irq = platform_get_irq(pdev, 0);
++
++ hdmi_cec_major = register_chrdev(hdmi_cec_major, "mxc_hdmi_cec", &hdmi_cec_fops);
++ if (hdmi_cec_major < 0) {
++ dev_err(&pdev->dev, "%s: unable to get a major for HDMI CEC\n", __func__);
++ err = -EBUSY;
++ goto out;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (unlikely(res == NULL)) {
++ dev_err(&pdev->dev, "%s: No HDMI irq line provided\n", __func__);
++ goto err_out_chrdev;
++ }
++ spin_lock_init(&hdmi_cec_root.i_lock);
++
++ err = devm_request_irq(&pdev->dev, irq, mxc_hdmi_cec_isr, IRQF_SHARED,
++ dev_name(&pdev->dev), &hdmi_cec_root);
++ if (err < 0) {
++ dev_err(&pdev->dev, "%s: Unable to request irq: %d\n", __func__, err);
++ goto err_out_chrdev;
++ }
++
++ hdmi_cec_class = class_create(THIS_MODULE, "mxc_hdmi_cec");
++ if (IS_ERR(hdmi_cec_class)) {
++ err = PTR_ERR(hdmi_cec_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(hdmi_cec_class, NULL,
++ MKDEV(hdmi_cec_major, 0), NULL, "mxc_hdmi_cec");
++ if (IS_ERR(temp_class)) {
++ err = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(&pdev->dev, "%s: can't get/select CEC pinctrl\n", __func__);
++ goto err_out_class;
++ }
++
++ init_waitqueue_head(&hdmi_cec_qs);
++ init_waitqueue_head(&hdmi_cec_qw);
++
++ INIT_LIST_HEAD(&hdmi_cec_root.client_head);
++ INIT_LIST_HEAD(&hdmi_cec_root.buffer_head);
++
++ mutex_init(&hdmi_cec_root.m_lock);
++ hdmi_cec_root.addresses = 0;
++ platform_set_drvdata(pdev, &hdmi_cec_root);
++ INIT_DELAYED_WORK(&hdmi_cec_root.hdmi_cec_work, mxc_hdmi_cec_worker);
++
++ dev_info(&pdev->dev, "%s: HDMI CEC initialized\n", __func__);
++ hdmi_cec_ready = 1;
++ goto out;
++
++err_out_class:
++ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
++ class_destroy(hdmi_cec_class);
++err_out_chrdev:
++ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
++out:
++ return err;
++}
++
++static int hdmi_cec_dev_remove(struct platform_device *pdev)
++{
++ if (hdmi_cec_major > 0) {
++ flush_scheduled_work();
++ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
++ class_destroy(hdmi_cec_class);
++ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
++ hdmi_cec_major = 0;
++ }
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_cec_match[] = {
++ { .compatible = "fsl,imx6q-hdmi-cec", },
++ { .compatible = "fsl,imx6dl-hdmi-cec", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_hdmi_cec_driver = {
++ .probe = hdmi_cec_dev_probe,
++ .remove = hdmi_cec_dev_remove,
++ .driver = {
++ .name = "mxc_hdmi_cec",
++ .of_match_table = imx_hdmi_cec_match,
++ },
++};
++
++module_platform_driver(mxc_hdmi_cec_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Linux HDMI CEC driver for Freescale i.MX/MXC");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:mxc_hdmi_cec");
++
+diff -Nur linux-4.1.3/drivers/mxc/hdmi-cec/mxc_hdmi-cec.debug.c linux-xbian-imx6/drivers/mxc/hdmi-cec/mxc_hdmi-cec.debug.c
+--- linux-4.1.3/drivers/mxc/hdmi-cec/mxc_hdmi-cec.debug.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/hdmi-cec/mxc_hdmi-cec.debug.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,765 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_hdmi-cec.c
++ *
++ * @brief HDMI CEC system initialization and file operation implementation
++ *
++ * @ingroup HDMI
++ */
++
++//#define DEBUG
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/fs.h>
++#include <linux/stat.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/workqueue.h>
++#include <linux/sizes.h>
++
++#include <linux/console.h>
++#include <linux/types.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/pinctrl/consumer.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "mxc_hdmi-cec.h"
++
++#define MAXCLIENTS 15
++
++struct hdmi_cec_priv {
++ u8 la;
++ u8 libcec_la;
++ struct list_head msg_head;
++ struct list_head client_node;
++ struct list_head wakeup_node;
++ wait_queue_head_t hdmi_cec_qm;
++ spinlock_t i_lock_cl;
++};
++
++struct hdmi_cec_event {
++ u8 event_type;
++ u8 msg_len;
++ u8 msg[MAX_MESSAGE_LEN];
++};
++
++struct hdmi_cec_event_list {
++ struct hdmi_cec_event data;
++ struct list_head msg_node;
++};
++
++struct hdmi_cec_shared {
++ bool write_busy;
++ int receive_error;
++ int send_error;
++ struct delayed_work hdmi_cec_work, hdmi_msg_trigger;
++ spinlock_t i_lock;
++ struct mutex m_lock_cl;
++ struct list_head client_head;
++ int nr_ff;
++ u16 addresses;
++ u8 latest_cec_stat;
++ u32 physical_address;
++};
++
++static struct hdmi_cec_shared hdmi_cec_root;
++
++static bool hdmi_cec_state;
++static int hdmi_cec_ready = 0;
++static int hdmi_cec_major;
++static struct class *hdmi_cec_class;
++static u8 open_count = 0;
++
++static wait_queue_head_t hdmi_cec_qs, hdmi_cec_qw;
++
++static inline bool la_is_local(u8 la)
++{
++ return (la == 0xf) ? false : hdmi_cec_root.addresses & BIT(la);
++}
++
++static irqreturn_t mxc_hdmi_cec_isr(int irq, void *data)
++{
++ u8 cec_stat = 0;
++ unsigned long flags;
++ irqreturn_t ret = IRQ_HANDLED;
++
++ spin_lock_irqsave(&hdmi_cec_root.i_lock, flags);
++
++ cec_stat = hdmi_readb(HDMI_IH_CEC_STAT0);
++ if (!cec_stat) {
++ ret = IRQ_NONE;
++ goto irqnone;
++ }
++ hdmi_writeb(0x7f, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(cec_stat, HDMI_IH_CEC_STAT0);
++
++ if (cec_stat & HDMI_IH_CEC_STAT0_ERROR_INIT) {
++ hdmi_cec_root.send_error++;
++ pr_debug("%s: error %d\n", __func__, hdmi_cec_root.send_error);
++ wake_up(&hdmi_cec_qs);
++ }
++ if (cec_stat & (HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_DONE)) {
++ hdmi_cec_root.send_error = 0;
++ wake_up(&hdmi_cec_qs);
++ }
++
++ hdmi_cec_root.latest_cec_stat = cec_stat;
++ pr_debug("%s: HDMI CEC interrupt received\n", __func__);
++ schedule_delayed_work(&(hdmi_cec_root.hdmi_cec_work), msecs_to_jiffies(5));
++
++irqnone:
++ spin_unlock_irqrestore(&hdmi_cec_root.i_lock, flags);
++ return ret;
++}
++
++static void __mxc_hdmi_cec_msg(struct hdmi_cec_event_list *event, struct hdmi_cec_priv *client, struct list_head *wakeup, u8 event_type)
++{
++ struct hdmi_cec_event_list *tevent = NULL;
++ unsigned long flags;
++
++ if (!(tevent = kzalloc(sizeof(struct hdmi_cec_event_list), GFP_KERNEL))) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return;
++ }
++ memcpy(tevent, event, max(sizeof(struct hdmi_cec_event_list),(size_t)event->data.msg_len));
++ if (event_type)
++ tevent->data.event_type = event_type;
++ spin_lock_irqsave(&client->i_lock_cl, flags);
++ list_add_tail(&tevent->msg_node, &client->msg_head);
++ spin_unlock_irqrestore(&client->i_lock_cl, flags);
++
++ if (wakeup)
++ list_add_tail(&client->wakeup_node, wakeup);
++ pr_debug("%s: -- event to client %x\n", __func__, (int)client);
++}
++
++void mxc_hdmi_cec_handle(u32 cec_stat)
++{
++ struct hdmi_cec_event_list *event = NULL;
++ struct hdmi_cec_priv *client = NULL;
++
++ if (cec_stat)
++ hdmi_cec_root.physical_address = cec_stat;
++
++ /* HDMI cable connected / HDMI cable disconnected */
++ if (!hdmi_cec_ready)
++ return;
++ pr_debug("%s: enter\n", __func__);
++
++ event = kzalloc(sizeof(struct hdmi_cec_event_list), GFP_KERNEL);
++ if (!event) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ return;
++ }
++ event->data.event_type = cec_stat ?
++ MESSAGE_TYPE_CONNECTED : MESSAGE_TYPE_DISCONNECTED;
++
++ list_for_each_entry(client, &hdmi_cec_root.client_head, client_node) {
++ __mxc_hdmi_cec_msg(event, client, NULL, 0);
++ wake_up(&client->hdmi_cec_qm);
++ }
++ pr_debug("%s: exit\n", __func__);
++}
++EXPORT_SYMBOL(mxc_hdmi_cec_handle);
++
++void mxc_hdmi_cec_msg(u8 event_type)
++{
++ struct hdmi_cec_event_list *event = NULL;
++ struct hdmi_cec_priv *client = NULL;
++ u8 i;
++ LIST_HEAD(wakeup);
++
++ event = kzalloc(sizeof(struct hdmi_cec_event_list), GFP_KERNEL);
++ if (NULL == event) {
++ pr_err("%s: Not enough memory!\n", __func__);
++ goto error2;
++ }
++ event->data.msg_len = (event_type == MESSAGE_TYPE_RECEIVE_SUCCESS) ?
++ hdmi_readb(HDMI_CEC_RX_CNT) : hdmi_readb(HDMI_CEC_TX_CNT);
++
++ if (!event->data.msg_len || event->data.msg_len > MAX_MESSAGE_LEN) {
++ pr_err("%s: Bad message size %d!\n", __func__, event->data.msg_len);
++ goto error1;
++ }
++
++ for (i = 0; i < event->data.msg_len; i++)
++ event->data.msg[i] = (event_type == MESSAGE_TYPE_RECEIVE_SUCCESS) ?
++ hdmi_readb(HDMI_CEC_RX_DATA0+i) : hdmi_readb(HDMI_CEC_TX_DATA0+i);
++
++ if (event_type == MESSAGE_TYPE_RECEIVE_SUCCESS)
++ hdmi_writeb(0x0, HDMI_CEC_LOCK);
++
++ event->data.event_type = (event_type == MESSAGE_TYPE_NOACK && la_is_local(event->data.msg[0] & 0x0f)) ?
++ MESSAGE_TYPE_SEND_SUCCESS : event_type;
++
++ list_for_each_entry(client, &hdmi_cec_root.client_head, client_node) {
++ if (client->la == 0xff) continue;
++ pr_debug("%s: -- l:%x, r:%x, ch:%x LA: %x\n", __func__, (event->data.msg[0] & 0xf0) >> 4, event->data.msg[0] & 0x0f, client->libcec_la, client->la);
++
++ if ((event_type == MESSAGE_TYPE_RECEIVE_SUCCESS &&
++ (((event->data.msg[0] & 0x0f) == 15 && client->libcec_la != client->la) || (event->data.msg[0] & 0x0f) == client->la))
++ ||
++ (event_type != MESSAGE_TYPE_RECEIVE_SUCCESS &&
++ (client->libcec_la == (int)client->la)) )
++ {
++ pr_debug("%s: -- adding msg %x->%x to client %x (event: %x)\n", __func__, (event->data.msg[0] & 0xf0) >> 4, event->data.msg[0] & 0x0f, client->la, (int)event);
++ __mxc_hdmi_cec_msg(event, client, &wakeup, 0);
++ }
++
++ if ((event->data.event_type == MESSAGE_TYPE_SEND_SUCCESS) &&
++ ((la_is_local(event->data.msg[0] & 0x0f) && (event->data.msg[0] & 0x0f) == client->la)
++ ||
++ ((event->data.msg[0] & 0x0f) == 15 && !client->libcec_la)) )
++ {
++ pr_debug("%s: -- COPYing event to client %x\n", __func__, (int)client);
++ __mxc_hdmi_cec_msg(event, client, &wakeup, MESSAGE_TYPE_RECEIVE_SUCCESS);
++ }
++ client->libcec_la = 0;
++ }
++ list_for_each_entry(client, &wakeup, wakeup_node)
++ wake_up(&(client->hdmi_cec_qm));
++
++error1:
++ kfree(event);
++error2:
++ if (event_type != MESSAGE_TYPE_RECEIVE_SUCCESS) {
++ mutex_lock(&hdmi_cec_root.m_lock_cl);
++ hdmi_cec_root.write_busy = false;
++ hdmi_writeb(0, HDMI_CEC_TX_CNT);
++ mutex_unlock(&hdmi_cec_root.m_lock_cl);
++ wake_up(&hdmi_cec_qw);
++ }
++}
++
++static void mxc_hdmi_cec_worker(struct work_struct *work)
++{
++ unsigned long flags;
++ u8 val;
++
++ pr_debug("%s: \n", __func__);
++ if (hdmi_cec_root.latest_cec_stat && open_count) {
++ /* The current transmission is successful (for initiator only).*/
++ if (hdmi_cec_root.latest_cec_stat & HDMI_IH_CEC_STAT0_DONE) {
++ mxc_hdmi_cec_msg(MESSAGE_TYPE_SEND_SUCCESS);
++ }
++ /*A frame is not acknowledged in a directly addressed message. Or a frame is negatively acknowledged in
++ a broadcast message (for initiator only).*/
++ if (hdmi_cec_root.latest_cec_stat & HDMI_IH_CEC_STAT0_NACK) {
++ mxc_hdmi_cec_msg(MESSAGE_TYPE_NOACK);
++ }
++ /*EOM is detected so that the received data is ready in the receiver data buffer*/
++ if (hdmi_cec_root.latest_cec_stat & HDMI_IH_CEC_STAT0_EOM) {
++ mxc_hdmi_cec_msg(MESSAGE_TYPE_RECEIVE_SUCCESS);
++ }
++ hdmi_cec_root.latest_cec_stat = 0;
++ }
++
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL | HDMI_IH_CEC_STAT0_ARB_LOST;
++// spin_lock_irqsave(&hdmi_cec_root.i_lock, flags);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++// spin_unlock_irqrestore(&hdmi_cec_root.i_lock, flags);
++ pr_debug("%s: exit\n", __func__);
++}
++
++static long hdmi_cec_set_address(u8 arg, struct hdmi_cec_priv *hdmi_cec);
++/*!
++ * @brief open function for cec file operation
++ *
++ * @return 0 on success or negative error code on error
++ */
++static int hdmi_cec_open(struct inode *inode, struct file *filp)
++{
++ struct hdmi_cec_priv *hdmi_cec = NULL;
++
++ mutex_lock(&hdmi_cec_root.m_lock_cl);
++ if (open_count == MAXCLIENTS || !hdmi_cec_ready) {
++ mutex_unlock(&hdmi_cec_root.m_lock_cl);
++ return -EBUSY;
++ }
++ open_count++;
++
++ hdmi_cec = kzalloc(sizeof(struct hdmi_cec_priv), GFP_KERNEL);
++ hdmi_cec->la = -1;
++ init_waitqueue_head(&(hdmi_cec->hdmi_cec_qm));
++ spin_lock_init(&(hdmi_cec->i_lock_cl));
++ INIT_LIST_HEAD(&hdmi_cec->msg_head);
++
++ filp->private_data = (void *)(hdmi_cec);
++
++ list_add_tail(&hdmi_cec->client_node, &hdmi_cec_root.client_head);
++ hdmi_cec_set_address(15, hdmi_cec);
++ mutex_unlock(&hdmi_cec_root.m_lock_cl);
++
++ return 0;
++}
++
++/*
++ * run delayed mxc_hdmi_cec_msg() to deal with msgs to LAs registered on local CEC.
++ * imx CEC is not transporting such messages through (local) interface
++ * (messages with destination the same as any regstered LA)
++ */
++static void mxc_hdmi_cec_msg_trigger(struct work_struct *work)
++{
++ mxc_hdmi_cec_msg(MESSAGE_TYPE_SEND_SUCCESS);
++}
++
++static ssize_t hdmi_cec_read(struct file *file, char __user *buf, size_t count,
++ loff_t *ppos)
++{
++ struct hdmi_cec_priv *hdmi_cec = file->private_data;
++ int ret = 0;
++
++ if (!open_count || hdmi_cec->la == 0xff)
++ return -ENODEV;
++
++ pr_debug("%s: client la %x, (addr %x)\n", __func__, hdmi_cec->la, (unsigned int)hdmi_cec);
++
++ count = min(count, sizeof(struct hdmi_cec_event));
++ do {
++ unsigned long flags;
++ struct hdmi_cec_event_list *event = NULL;
++
++ spin_lock_irqsave(&(hdmi_cec->i_lock_cl), flags);
++ if (!list_empty(&hdmi_cec->msg_head)) {
++ event = list_first_entry_or_null(&hdmi_cec->msg_head, struct hdmi_cec_event_list, msg_node);
++ list_del(&event->msg_node);
++ }
++ spin_unlock_irqrestore(&(hdmi_cec->i_lock_cl), flags);
++
++ if (event) {
++ ret = copy_to_user(buf, &event->data, count) ? -EFAULT : count;
++ kfree(event);
++ }
++ else if (file->f_flags & O_NONBLOCK) {
++ ret = -EAGAIN;
++ }
++ else if (wait_event_interruptible(hdmi_cec->hdmi_cec_qm, (!list_empty(&hdmi_cec->msg_head)))) {
++ ret = -ERESTARTSYS;
++ }
++ } while(!ret);
++
++ pr_debug("%s: exit %d\n", __func__, ret);
++ return ret;
++}
++
++static ssize_t hdmi_cec_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct hdmi_cec_priv *hdmi_cec = file->private_data;
++ int ret = 0 , i = 0;
++ u8 msg[MAX_MESSAGE_LEN];
++ u8 val = 0;
++ int timeout = 1500;
++
++ if (!open_count || hdmi_cec->la == 0xff)
++ return -ENODEV;
++
++ if (count > MAX_MESSAGE_LEN)
++ return -E2BIG;
++
++ memset(&msg, 0, MAX_MESSAGE_LEN);
++ if (copy_from_user(&msg, buf, count))
++ return -EFAULT;
++
++ if (file->f_flags & O_NONBLOCK && hdmi_cec_root.write_busy)
++ return -EAGAIN;
++ else if (wait_event_interruptible(hdmi_cec_qw, (!hdmi_cec_root.write_busy)))
++ return -ERESTARTSYS;
++
++ mutex_lock(&hdmi_cec_root.m_lock_cl);
++ pr_debug("%s: \n", __func__);
++ hdmi_cec_root.write_busy = true;
++
++ hdmi_writeb(count, HDMI_CEC_TX_CNT);
++ for (i = 0; i < count; i++)
++ hdmi_writeb(msg[i], HDMI_CEC_TX_DATA0+i);
++
++ hdmi_cec->libcec_la = hdmi_cec->la;
++ if (la_is_local(msg[0] & 0x0f)) {
++ ret = count;
++ schedule_delayed_work(&(hdmi_cec_root.hdmi_msg_trigger), msecs_to_jiffies(20));
++ pr_debug("%s: wait_event la_is_local\n", __func__);
++ } else
++
++ do {
++ val = hdmi_readb(HDMI_CEC_CTRL) | 0x01;
++ val |= hdmi_cec_root.send_error ? SIGNAL_FREE_TIME_RESEND : SIGNAL_FREE_TIME_NORMAL;
++ hdmi_writeb(val, HDMI_CEC_CTRL);
++
++ ret = wait_event_timeout(hdmi_cec_qs, !((val = hdmi_readb(HDMI_CEC_CTRL)) & 0x01), msecs_to_jiffies(timeout));
++ pr_debug("%s: wait_event ret %d\n", __func__, ret);
++ if (hdmi_cec_root.send_error > 5 || ret < 2) {
++ hdmi_writeb(0, HDMI_CEC_TX_CNT);
++ hdmi_cec_root.write_busy = false;
++ wake_up(&hdmi_cec_qw);
++ ret = -EIO;
++ } else if (hdmi_cec_root.send_error && ret > 1) {
++ pr_debug("%s: --- resending msg\n", __func__);
++ timeout = jiffies_to_msecs(ret);
++ ret = 0;
++ } else if (ret > 1) {
++ ret = count;
++ }
++ } while(!ret);
++
++ mutex_unlock(&hdmi_cec_root.m_lock_cl);
++ return ret;
++}
++
++static void hdmi_cec_hwenable(void)
++{
++ u8 val;
++
++ pr_debug("%s: \n", __func__);
++ hdmi_cec_state = true;
++
++ val = hdmi_readb(HDMI_MC_CLKDIS);
++ val &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ hdmi_writeb(val, HDMI_MC_CLKDIS);
++
++ val = HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_NACK |
++ HDMI_IH_CEC_STAT0_EOM | HDMI_IH_CEC_STAT0_DONE;
++ hdmi_writeb(val, HDMI_CEC_POLARITY);
++
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL |
++ HDMI_IH_CEC_STAT0_ARB_LOST;
++ hdmi_writeb(val, HDMI_CEC_MASK);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++ hdmi_writeb(0x0, HDMI_CEC_LOCK);
++ hdmi_writeb(0x02, HDMI_CEC_CTRL);
++}
++
++static void hdmi_cec_hwdisable(void)
++{
++ u8 val;
++
++ pr_debug("%s: \n", __func__);
++ hdmi_cec_state = false;
++
++ hdmi_writeb(0x10, HDMI_CEC_CTRL);
++
++ val = HDMI_IH_CEC_STAT0_WAKEUP | HDMI_IH_CEC_STAT0_ERROR_FOLL |
++ HDMI_IH_CEC_STAT0_ERROR_INIT | HDMI_IH_CEC_STAT0_ARB_LOST |
++ HDMI_IH_CEC_STAT0_NACK | HDMI_IH_CEC_STAT0_EOM |
++ HDMI_IH_CEC_STAT0_DONE;
++ hdmi_writeb(val, HDMI_CEC_MASK);
++ hdmi_writeb(val, HDMI_IH_MUTE_CEC_STAT0);
++
++ hdmi_writeb(0x0, HDMI_CEC_POLARITY);
++
++ val = hdmi_readb(HDMI_MC_CLKDIS);
++ val |= HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ hdmi_writeb(val, HDMI_MC_CLKDIS);
++}
++
++static long hdmi_cec_set_address(u8 arg, struct hdmi_cec_priv *hdmi_cec)
++{
++
++ if (hdmi_cec->la == arg)
++ return 0;
++
++ pr_debug("%s: to %d\n", __func__, arg);
++
++ if (arg == 15)
++ hdmi_cec_root.nr_ff++;
++ if (hdmi_cec->la == 15)
++ hdmi_cec_root.nr_ff--;
++ else
++ hdmi_cec_root.addresses &= ~BIT(hdmi_cec->la);
++
++ hdmi_cec->la = arg;
++ if ((u8)arg != 0xff)
++ hdmi_cec_root.addresses |= BIT(arg);
++ else
++ wake_up(&hdmi_cec->hdmi_cec_qm);
++
++ if (!hdmi_cec_root.nr_ff)
++ hdmi_cec_root.addresses &= ~BIT(15);
++
++ hdmi_writeb(hdmi_cec_root.addresses & 0xff, HDMI_CEC_ADDR_L);
++ /*
++ * Don't register LA = 15 to with hardware. with it set, broadcast messages are never
++ * sent (considered local by CEC controler)
++ */
++ hdmi_writeb((hdmi_cec_root.addresses & 0x7f00) >> 8, HDMI_CEC_ADDR_H);
++ return 0;
++}
++
++/*!
++ * @brief IO ctrl function for vpu file operation
++ * @param cmd IO ctrl command
++ * @return 0 on success or negative error code on error
++ */
++static long hdmi_cec_ioctl(struct file *filp, u_int cmd,
++ u_long arg)
++{
++ int ret = 0, i;
++ struct hdmi_cec_priv *hdmi_cec = filp->private_data;
++
++ pr_debug("%s: \n", __func__);
++
++ if (!open_count)
++ return -ENODEV;
++
++ switch (cmd) {
++ case HDMICEC_IOC_SETLOGICALADDRESS:
++ mutex_lock(&hdmi_cec_root.m_lock_cl);
++ ret = hdmi_cec_set_address(arg, hdmi_cec);
++ mutex_unlock(&hdmi_cec_root.m_lock_cl);
++ /*
++ * in case we have more clients, inform them about PA change.
++ * (if libCEC is not in monitoring mode, it won't allow more
++ * clients with same PA - it changes all previous holders of
++ * that 'taken' PA to 1000.
++ * to avoid that, we expand PA further by replacing first empty
++ * dimension with LA (for instance if our PA is 2.2.0.0, we
++ * change to 2.2.X.0 all concurent clients, where X is actual LA
++ */
++ if (open_count > 1 && arg != 15)
++ mxc_hdmi_cec_handle(hdmi_cec_root.physical_address);
++ break;
++
++ case HDMICEC_IOC_STARTDEVICE:
++ if (!hdmi_cec_state)
++ hdmi_cec_hwenable();
++ break;
++
++ case HDMICEC_IOC_STOPDEVICE:
++ hdmi_cec_set_address(-1, hdmi_cec);
++ if (hdmi_cec_state && open_count < 2)
++ hdmi_cec_hwdisable();
++ break;
++
++ case HDMICEC_IOC_GETPHYADDRESS:
++ ret = copy_to_user((void __user *)arg, &hdmi_cec_root.physical_address,
++ 4*sizeof(u8))?-EFAULT:0;
++ if (open_count > 1) {
++ for (i = 0; i < 4 && ((u8*)arg)[i] != 0x0; i++);
++ ((u8*)arg)[i] = hdmi_cec->la;
++ }
++ break;
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++void hdmi_cec_start_device(void)
++{
++ if(open_count && hdmi_cec_ready && !hdmi_cec_state)
++ hdmi_cec_hwenable();
++}
++EXPORT_SYMBOL(hdmi_cec_start_device);
++
++void hdmi_cec_stop_device(void)
++{
++ if(hdmi_cec_ready && hdmi_cec_state)
++ hdmi_cec_hwdisable();
++}
++EXPORT_SYMBOL(hdmi_cec_stop_device);
++
++/*!
++* @brief Release function for vpu file operation
++* @return 0 on success or negative error code on error
++*/
++static int hdmi_cec_release(struct inode *inode, struct file *filp)
++{
++ struct hdmi_cec_priv *hdmi_cec = filp->private_data;
++ struct hdmi_cec_priv *client, *t;
++ unsigned long flags;
++
++ pr_debug("%s: \n", __func__);
++
++ mutex_lock(&hdmi_cec_root.m_lock_cl);
++ spin_lock_irqsave(&hdmi_cec->i_lock_cl, flags);
++
++ hdmi_cec_set_address(-1, hdmi_cec);
++ if (open_count > 0)
++ open_count--;
++ if (!open_count)
++ hdmi_cec_hwdisable();
++ while (!list_empty(&hdmi_cec->msg_head)) {
++ struct hdmi_cec_event_list *event = NULL;
++
++ event = list_first_entry(&hdmi_cec->msg_head, struct hdmi_cec_event_list, msg_node);
++ list_del(&event->msg_node);
++ kfree(event);
++ }
++
++ spin_unlock_irqrestore(&hdmi_cec->i_lock_cl, flags);
++
++ list_for_each_entry_safe(client, t, &hdmi_cec_root.client_head, client_node) {
++ if (client == hdmi_cec) {
++ list_del(&client->client_node);
++ filp->private_data = NULL;
++ kfree(client);
++ }
++ }
++
++ mutex_unlock(&hdmi_cec_root.m_lock_cl);
++ return 0;
++}
++
++static unsigned int hdmi_cec_poll(struct file *file, poll_table *wait)
++{
++ unsigned int mask = 0;
++ struct hdmi_cec_priv *hdmi_cec = file->private_data;
++
++ pr_debug("%s: poll client %lx, la %d\n", __func__, (unsigned long)hdmi_cec, hdmi_cec->la);
++
++ if (hdmi_cec->la == 0xff)
++ return POLLHUP;
++
++ poll_wait(file, &hdmi_cec->hdmi_cec_qm, wait);
++ poll_wait(file, &hdmi_cec_qw, wait);
++
++ if (!hdmi_cec_root.write_busy)
++ mask = (POLLOUT | POLLWRNORM);
++ if (!list_empty(&hdmi_cec->msg_head))
++ mask |= (POLLIN | POLLRDNORM);
++
++ return mask;
++}
++
++const struct file_operations hdmi_cec_fops = {
++ .owner = THIS_MODULE,
++ .read = hdmi_cec_read,
++ .write = hdmi_cec_write,
++ .open = hdmi_cec_open,
++ .unlocked_ioctl = hdmi_cec_ioctl,
++ .release = hdmi_cec_release,
++ .poll = hdmi_cec_poll,
++};
++
++static int hdmi_cec_dev_probe(struct platform_device *pdev)
++{
++ int err = 0;
++ struct device *temp_class;
++ struct resource *res;
++ struct pinctrl *pinctrl;
++ int irq = platform_get_irq(pdev, 0);
++
++ hdmi_cec_major = register_chrdev(hdmi_cec_major, "mxc_hdmi_cec", &hdmi_cec_fops);
++ if (hdmi_cec_major < 0) {
++ dev_err(&pdev->dev, "%s: unable to get a major for HDMI CEC\n", __func__);
++ err = -EBUSY;
++ goto out;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (unlikely(res == NULL)) {
++ dev_err(&pdev->dev, "%s: No HDMI irq line provided\n", __func__);
++ goto err_out_chrdev;
++ }
++ spin_lock_init(&hdmi_cec_root.i_lock);
++
++ err = devm_request_irq(&pdev->dev, irq, mxc_hdmi_cec_isr, IRQF_SHARED,
++ dev_name(&pdev->dev), &hdmi_cec_root);
++ if (err < 0) {
++ dev_err(&pdev->dev, "%s: Unable to request irq: %d\n", __func__, err);
++ goto err_out_chrdev;
++ }
++
++ hdmi_cec_class = class_create(THIS_MODULE, "mxc_hdmi_cec");
++ if (IS_ERR(hdmi_cec_class)) {
++ err = PTR_ERR(hdmi_cec_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(hdmi_cec_class, NULL,
++ MKDEV(hdmi_cec_major, 0), NULL, "mxc_hdmi_cec");
++ if (IS_ERR(temp_class)) {
++ err = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(&pdev->dev, "%s: can't get/select CEC pinctrl\n", __func__);
++ goto err_out_class;
++ }
++
++ init_waitqueue_head(&hdmi_cec_qs);
++ init_waitqueue_head(&hdmi_cec_qw);
++
++ INIT_LIST_HEAD(&hdmi_cec_root.client_head);
++
++ mutex_init(&hdmi_cec_root.m_lock_cl);
++ hdmi_cec_root.addresses = 0;
++ platform_set_drvdata(pdev, &hdmi_cec_root);
++ INIT_DELAYED_WORK(&hdmi_cec_root.hdmi_cec_work, mxc_hdmi_cec_worker);
++ INIT_DELAYED_WORK(&hdmi_cec_root.hdmi_msg_trigger, mxc_hdmi_cec_msg_trigger);
++
++ dev_info(&pdev->dev, "%s: HDMI CEC initialized\n", __func__);
++ hdmi_cec_ready = 1;
++ goto out;
++
++err_out_class:
++ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
++ class_destroy(hdmi_cec_class);
++err_out_chrdev:
++ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
++out:
++ return err;
++}
++
++static int hdmi_cec_dev_remove(struct platform_device *pdev)
++{
++ if (hdmi_cec_major > 0) {
++ flush_scheduled_work();
++ device_destroy(hdmi_cec_class, MKDEV(hdmi_cec_major, 0));
++ class_destroy(hdmi_cec_class);
++ unregister_chrdev(hdmi_cec_major, "mxc_hdmi_cec");
++ hdmi_cec_major = 0;
++ }
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_cec_match[] = {
++ { .compatible = "fsl,imx6q-hdmi-cec", },
++ { .compatible = "fsl,imx6dl-hdmi-cec", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mxc_hdmi_cec_driver = {
++ .probe = hdmi_cec_dev_probe,
++ .remove = hdmi_cec_dev_remove,
++ .driver = {
++ .name = "mxc_hdmi_cec",
++ .of_match_table = imx_hdmi_cec_match,
++ },
++};
++
++module_platform_driver(mxc_hdmi_cec_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Linux HDMI CEC driver for Freescale i.MX/MXC");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:mxc_hdmi_cec");
++
+diff -Nur linux-4.1.3/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h linux-xbian-imx6/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h
+--- linux-4.1.3/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/hdmi-cec/mxc_hdmi-cec.h 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,53 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef _HDMICEC_H_
++#define _HDMICEC_H_
++#include <linux/ioctl.h>
++
++/*
++ * Ioctl definitions
++ */
++
++/* Use 'k' as magic number */
++#define HDMICEC_IOC_MAGIC 'H'
++/*
++ * S means "Set" through a ptr,
++ * T means "Tell" directly with the argument value
++ * G means "Get": reply by setting through a pointer
++ * Q means "Query": response is on the return value
++ * X means "eXchange": G and S atomically
++ * H means "sHift": T and Q atomically
++ */
++#define HDMICEC_IOC_SETLOGICALADDRESS \
++ _IOW(HDMICEC_IOC_MAGIC, 1, unsigned char)
++#define HDMICEC_IOC_STARTDEVICE _IO(HDMICEC_IOC_MAGIC, 2)
++#define HDMICEC_IOC_STOPDEVICE _IO(HDMICEC_IOC_MAGIC, 3)
++#define HDMICEC_IOC_GETPHYADDRESS \
++ _IOR(HDMICEC_IOC_MAGIC, 4, unsigned char[4])
++#define HDMICEC_IOC_LOG \
++ _IOW(HDMICEC_IOC_MAGIC, 5, unsigned char[255])
++
++#define MAX_MESSAGE_LEN 16
++
++#define MESSAGE_TYPE_RECEIVE_SUCCESS 1
++#define MESSAGE_TYPE_NOACK 2
++#define MESSAGE_TYPE_DISCONNECTED 3
++#define MESSAGE_TYPE_CONNECTED 4
++#define MESSAGE_TYPE_SEND_SUCCESS 5
++
++#define SIGNAL_FREE_LOST BIT(2)
++#define SIGNAL_FREE_TIME_NORMAL BIT(1)
++#define SIGNAL_FREE_TIME_RESEND 0
++
++#endif /* !_HDMICEC_H_ */
++
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c linux-xbian-imx6/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_calc_stripes_sizes.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,495 @@
++/*
++ * Copyright 2009-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_calc_stripes_sizes.c
++ *
++ * @brief IPU IC functions
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/math64.h>
++
++#define BPP_32 0
++#define BPP_16 3
++#define BPP_8 5
++#define BPP_24 1
++#define BPP_12 4
++#define BPP_18 2
++
++static u32 truncate(u32 up, /* 0: down; else: up */
++ u64 a, /* must be non-negative */
++ u32 b)
++{
++ u32 d;
++ u64 div;
++ div = div_u64(a, b);
++ d = b * (div >> 32);
++ if (up && (a > (((u64)d) << 32)))
++ return d+b;
++ else
++ return d;
++}
++
++static unsigned int f_calc(unsigned int pfs, unsigned int bpp, unsigned int *write)
++{/* return input_f */
++ unsigned int f_calculated = 0;
++ switch (pfs) {
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV444P:
++ f_calculated = 16;
++ break;
++
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ f_calculated = 8;
++ break;
++
++ case IPU_PIX_FMT_NV12:
++ f_calculated = 8;
++ break;
++
++ default:
++ f_calculated = 0;
++ break;
++
++ }
++ if (!f_calculated) {
++ switch (bpp) {
++ case BPP_32:
++ f_calculated = 2;
++ break;
++
++ case BPP_16:
++ f_calculated = 4;
++ break;
++
++ case BPP_8:
++ case BPP_24:
++ f_calculated = 8;
++ break;
++
++ case BPP_12:
++ f_calculated = 16;
++ break;
++
++ case BPP_18:
++ f_calculated = 32;
++ break;
++
++ default:
++ f_calculated = 0;
++ break;
++ }
++ }
++ return f_calculated;
++}
++
++
++static unsigned int m_calc(unsigned int pfs)
++{
++ unsigned int m_calculated = 0;
++ switch (pfs) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV444P:
++ m_calculated = 16;
++ break;
++
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ m_calculated = 8;
++ break;
++
++ default:
++ m_calculated = 8;
++ break;
++
++ }
++ return m_calculated;
++}
++
++static int calc_split_resize_coeffs(unsigned int inSize, unsigned int outSize,
++ unsigned int *resizeCoeff,
++ unsigned int *downsizeCoeff)
++{
++ uint32_t tempSize;
++ uint32_t tempDownsize;
++
++ if (inSize > 4096) {
++ pr_debug("IC input size(%d) cannot exceed 4096\n",
++ inSize);
++ return -EINVAL;
++ }
++
++ if (outSize > 1024) {
++ pr_debug("IC output size(%d) cannot exceed 1024\n",
++ outSize);
++ return -EINVAL;
++ }
++
++ if ((outSize << 3) < inSize) {
++ pr_debug("IC cannot downsize more than 8:1\n");
++ return -EINVAL;
++ }
++
++ /* Compute downsizing coefficient */
++ /* Output of downsizing unit cannot be more than 1024 */
++ tempDownsize = 0;
++ tempSize = inSize;
++ while (((tempSize > 1024) || (tempSize >= outSize * 2)) &&
++ (tempDownsize < 2)) {
++ tempSize >>= 1;
++ tempDownsize++;
++ }
++ *downsizeCoeff = tempDownsize;
++
++ /* compute resizing coefficient using the following equation:
++ resizeCoeff = M*(SI -1)/(SO - 1)
++ where M = 2^13, SI - input size, SO - output size */
++ *resizeCoeff = (8192L * (tempSize - 1)) / (outSize - 1);
++ if (*resizeCoeff >= 16384L) {
++ pr_debug("Overflow on IC resize coefficient.\n");
++ return -EINVAL;
++ }
++
++ pr_debug("resizing from %u -> %u pixels, "
++ "downsize=%u, resize=%u.%lu (reg=%u)\n", inSize, outSize,
++ *downsizeCoeff, (*resizeCoeff >= 8192L) ? 1 : 0,
++ ((*resizeCoeff & 0x1FFF) * 10000L) / 8192L, *resizeCoeff);
++
++ return 0;
++}
++
++/* Stripe parameters calculator */
++/**************************************************************************
++Notes:
++MSW = the maximal width allowed for a stripe
++ i.MX31: 720, i.MX35: 800, i.MX37/51/53: 1024
++cirr = the maximal inverse resizing ratio for which overlap in the input
++ is requested; typically cirr~2
++flags
++ bit 0 - equal_stripes
++ 0 each stripe is allowed to have independent parameters
++ for maximal image quality
++ 1 the stripes are requested to have identical parameters
++ (except the base address), for maximal performance
++ bit 1 - vertical/horizontal
++ 0 horizontal
++ 1 vertical
++
++If performance is the top priority (above image quality)
++ Avoid overlap, by setting CIRR = 0
++ This will also force effectively identical_stripes = 1
++ Choose IF & OF that corresponds to the same IOX/SX for both stripes
++ Choose IFW & OFW such that
++ IFW/IM, IFW/IF, OFW/OM, OFW/OF are even integers
++ The function returns an error status:
++ 0: no error
++ 1: invalid input parameters -> aborted without result
++ Valid parameters should satisfy the following conditions
++ IFW <= OFW, otherwise downsizing is required
++ - which is not supported yet
++ 4 <= IFW,OFW, so some interpolation may be needed even without overlap
++ IM, OM, IF, OF should not vanish
++ 2*IF <= IFW
++ so the frame can be split to two equal stripes, even without overlap
++ 2*(OF+IF/irr_opt) <= OFW
++ so a valid positive INW exists even for equal stripes
++ OF <= MSW, otherwise, the left stripe cannot be sufficiently large
++ MSW < OFW, so splitting to stripes is required
++ OFW <= 2*MSW, so two stripes are sufficient
++ (this also implies that 2<=MSW)
++ 2: OF is not a multiple of OM - not fully-supported yet
++ Output is produced but OW is not guaranited to be a multiple of OM
++ 4: OFW reduced to be a multiple of OM
++ 8: CIRR > 1: truncated to 1
++ Overlap is not supported (and not needed) y for upsizing)
++**************************************************************************/
++int ipu_calc_stripes_sizes(const unsigned int input_frame_width,
++ /* input frame width;>1 */
++ unsigned int output_frame_width, /* output frame width; >1 */
++ const unsigned int maximal_stripe_width,
++ /* the maximal width allowed for a stripe */
++ const unsigned long long cirr, /* see above */
++ const unsigned int flags, /* see above */
++ u32 input_pixelformat,/* pixel format after of read channel*/
++ u32 output_pixelformat,/* pixel format after of write channel*/
++ struct stripe_param *left,
++ struct stripe_param *right)
++{
++ const unsigned int irr_frac_bits = 13;
++ const unsigned long irr_steps = 1 << irr_frac_bits;
++ const u64 dirr = ((u64)1) << (32 - 2);
++ /* The maximum relative difference allowed between the irrs */
++ const u64 cr = ((u64)4) << 32;
++ /* The importance ratio between the two terms in the cost function below */
++
++ unsigned int status;
++ unsigned int temp;
++ unsigned int onw_min;
++ unsigned int inw = 0, onw = 0, inw_best = 0;
++ /* number of pixels in the left stripe NOT hidden by the right stripe */
++ u64 irr_opt; /* the optimal inverse resizing ratio */
++ u64 rr_opt; /* the optimal resizing ratio = 1/irr_opt*/
++ u64 dinw; /* the misalignment between the stripes */
++ /* (measured in units of input columns) */
++ u64 difwl, difwr = 0;
++ /* The number of input columns not reflected in the output */
++ /* the resizing ratio used for the right stripe is */
++ /* left->irr and right->irr respectively */
++ u64 cost, cost_min;
++ u64 div; /* result of division */
++ bool equal_stripes = (flags & 0x1) != 0;
++ bool vertical = (flags & 0x2) != 0;
++
++ unsigned int input_m, input_f, output_m, output_f; /* parameters for upsizing by stripes */
++ unsigned int resize_coeff;
++ unsigned int downsize_coeff;
++
++ status = 0;
++
++ if (vertical) {
++ input_f = 2;
++ input_m = 8;
++ output_f = 8;
++ output_m = 2;
++ } else {
++ input_f = f_calc(input_pixelformat, 0, NULL);
++ input_m = m_calc(input_pixelformat);
++ output_f = input_m;
++ output_m = m_calc(output_pixelformat);
++ }
++ if ((input_frame_width < 4) || (output_frame_width < 4))
++ return 1;
++
++ irr_opt = div_u64((((u64)(input_frame_width - 1)) << 32),
++ (output_frame_width - 1));
++ rr_opt = div_u64((((u64)(output_frame_width - 1)) << 32),
++ (input_frame_width - 1));
++
++ if ((input_m == 0) || (output_m == 0) || (input_f == 0) || (output_f == 0)
++ || (input_frame_width < (2 * input_f))
++ || ((((u64)output_frame_width) << 32) <
++ (2 * ((((u64)output_f) << 32) + (input_f * rr_opt))))
++ || (maximal_stripe_width < output_f)
++ || ((output_frame_width <= maximal_stripe_width)
++ && (equal_stripes == 0))
++ || ((2 * maximal_stripe_width) < output_frame_width))
++ return 1;
++
++ if (output_f % output_m)
++ status += 2;
++
++ temp = truncate(0, (((u64)output_frame_width) << 32), output_m);
++ if (temp < output_frame_width) {
++ output_frame_width = temp;
++ status += 4;
++ }
++
++ pr_debug("---------------->\n"
++ "if = %d\n"
++ "im = %d\n"
++ "of = %d\n"
++ "om = %d\n"
++ "irr_opt = %llu\n"
++ "rr_opt = %llu\n"
++ "cirr = %llu\n"
++ "pixel in = %08x\n"
++ "pixel out = %08x\n"
++ "ifw = %d\n"
++ "ofwidth = %d\n",
++ input_f,
++ input_m,
++ output_f,
++ output_m,
++ irr_opt,
++ rr_opt,
++ cirr,
++ input_pixelformat,
++ output_pixelformat,
++ input_frame_width,
++ output_frame_width
++ );
++
++ if (equal_stripes) {
++ if ((irr_opt > cirr) /* overlap in the input is not requested */
++ && ((input_frame_width % (input_m << 1)) == 0)
++ && ((input_frame_width % (input_f << 1)) == 0)
++ && ((output_frame_width % (output_m << 1)) == 0)
++ && ((output_frame_width % (output_f << 1)) == 0)) {
++ /* without overlap */
++ left->input_width = right->input_width = right->input_column =
++ input_frame_width >> 1;
++ left->output_width = right->output_width = right->output_column =
++ output_frame_width >> 1;
++ left->input_column = 0;
++ left->output_column = 0;
++ div = div_u64(((((u64)irr_steps) << 32) *
++ (right->input_width - 1)), (right->output_width - 1));
++ left->irr = right->irr = truncate(0, div, 1);
++ } else { /* with overlap */
++ onw = truncate(0, (((u64)output_frame_width - 1) << 32) >> 1,
++ output_f);
++ inw = truncate(0, onw * irr_opt, input_f);
++ /* this is the maximal inw which allows the same resizing ratio */
++ /* in both stripes */
++ onw = truncate(1, (inw * rr_opt), output_f);
++ div = div_u64((((u64)(irr_steps * inw)) <<
++ 32), onw);
++ left->irr = right->irr = truncate(0, div, 1);
++ left->output_width = right->output_width =
++ output_frame_width - onw;
++ /* These are valid assignments for output_width, */
++ /* assuming output_f is a multiple of output_m */
++ div = (((u64)(left->output_width-1) * (left->irr)) << 32);
++ div = (((u64)1) << 32) + div_u64(div, irr_steps);
++
++ left->input_width = right->input_width = truncate(1, div, input_m);
++
++ div = div_u64((((u64)((right->output_width - 1) * right->irr)) <<
++ 32), irr_steps);
++ difwr = (((u64)(input_frame_width - 1 - inw)) << 32) - div;
++ div = div_u64((difwr + (((u64)input_f) << 32)), 2);
++ left->input_column = truncate(0, div, input_f);
++
++
++ /* This splits the truncated input columns evenly */
++ /* between the left and right margins */
++ right->input_column = left->input_column + inw;
++ left->output_column = 0;
++ right->output_column = onw;
++ }
++ if (left->input_width > left->output_width) {
++ if (calc_split_resize_coeffs(left->input_width,
++ left->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++
++ if (downsize_coeff > 0) {
++ left->irr = right->irr =
++ (downsize_coeff << 14) | resize_coeff;
++ }
++ }
++ pr_debug("inw %d, onw %d, ilw %d, ilc %d, olw %d,"
++ " irw %d, irc %d, orw %d, orc %d, "
++ "difwr %llu, lirr %u\n",
++ inw, onw, left->input_width,
++ left->input_column, left->output_width,
++ right->input_width, right->input_column,
++ right->output_width,
++ right->output_column, difwr, left->irr);
++ } else { /* independent stripes */
++ onw_min = output_frame_width - maximal_stripe_width;
++ /* onw is a multiple of output_f, in the range */
++ /* [max(output_f,output_frame_width-maximal_stripe_width),*/
++ /*min(output_frame_width-2,maximal_stripe_width)] */
++ /* definitely beyond the cost of any valid setting */
++ cost_min = (((u64)input_frame_width) << 32) + cr;
++ onw = truncate(0, ((u64)maximal_stripe_width), output_f);
++ if (output_frame_width - onw == 1)
++ onw -= output_f; /* => onw and output_frame_width-1-onw are positive */
++ inw = truncate(0, onw * irr_opt, input_f);
++ /* this is the maximal inw which allows the same resizing ratio */
++ /* in both stripes */
++ onw = truncate(1, inw * rr_opt, output_f);
++ do {
++ div = div_u64((((u64)(irr_steps * inw)) << 32), onw);
++ left->irr = truncate(0, div, 1);
++ div = div_u64((((u64)(onw * left->irr)) << 32),
++ irr_steps);
++ dinw = (((u64)inw) << 32) - div;
++
++ div = div_u64((((u64)((output_frame_width - 1 - onw) * left->irr)) <<
++ 32), irr_steps);
++
++ difwl = (((u64)(input_frame_width - 1 - inw)) << 32) - div;
++
++ cost = difwl + (((u64)(cr * dinw)) >> 32);
++
++ if (cost < cost_min) {
++ inw_best = inw;
++ cost_min = cost;
++ }
++
++ inw -= input_f;
++ onw = truncate(1, inw * rr_opt, output_f);
++ /* This is the minimal onw which allows the same resizing ratio */
++ /* in both stripes */
++ } while (onw >= onw_min);
++
++ inw = inw_best;
++ onw = truncate(1, inw * rr_opt, output_f);
++ div = div_u64((((u64)(irr_steps * inw)) << 32), onw);
++ left->irr = truncate(0, div, 1);
++
++ left->output_width = onw;
++ right->output_width = output_frame_width - onw;
++ /* These are valid assignments for output_width, */
++ /* assuming output_f is a multiple of output_m */
++ left->input_width = truncate(1, ((u64)(inw + 1)) << 32, input_m);
++ right->input_width = truncate(1, ((u64)(input_frame_width - inw)) <<
++ 32, input_m);
++
++ div = div_u64((((u64)(irr_steps * (input_frame_width - 1 - inw))) <<
++ 32), (right->output_width - 1));
++ right->irr = truncate(0, div, 1);
++ temp = truncate(0, ((u64)left->irr) * ((((u64)1) << 32) + dirr), 1);
++ if (temp < right->irr)
++ right->irr = temp;
++ div = div_u64(((u64)((right->output_width - 1) * right->irr) <<
++ 32), irr_steps);
++ difwr = (u64)(input_frame_width - 1 - inw) - div;
++
++
++ div = div_u64((difwr + (((u64)input_f) << 32)), 2);
++ left->input_column = truncate(0, div, input_f);
++
++ /* This splits the truncated input columns evenly */
++ /* between the left and right margins */
++ right->input_column = left->input_column + inw;
++ left->output_column = 0;
++ right->output_column = onw;
++ if (left->input_width > left->output_width) {
++ if (calc_split_resize_coeffs(left->input_width,
++ left->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++ left->irr = (downsize_coeff << 14) | resize_coeff;
++ }
++ if (right->input_width > right->output_width) {
++ if (calc_split_resize_coeffs(right->input_width,
++ right->output_width,
++ &resize_coeff,
++ &downsize_coeff) < 0)
++ return -EINVAL;
++ right->irr = (downsize_coeff << 14) | resize_coeff;
++ }
++ }
++ return status;
++}
++EXPORT_SYMBOL(ipu_calc_stripes_sizes);
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_capture.c linux-xbian-imx6/drivers/mxc/ipu3/ipu_capture.c
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_capture.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_capture.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,816 @@
++/*
++ * Copyright 2008-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_capture.c
++ *
++ * @brief IPU capture dase functions
++ *
++ * @ingroup IPU
++ */
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include "ipu_prv.h"
++#include "ipu_regs.h"
++
++/*!
++ * _ipu_csi_mclk_set
++ *
++ * @param ipu ipu handler
++ * @param pixel_clk desired pixel clock frequency in Hz
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_mclk_set(struct ipu_soc *ipu, uint32_t pixel_clk, uint32_t csi)
++{
++ uint32_t temp;
++ uint32_t div_ratio;
++
++ div_ratio = (clk_get_rate(ipu->ipu_clk) / pixel_clk) - 1;
++
++ if (div_ratio > 0xFF || div_ratio < 0) {
++ dev_dbg(ipu->dev, "value of pixel_clk extends normal range\n");
++ return -EINVAL;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SENS_CONF);
++ temp &= ~CSI_SENS_CONF_DIVRATIO_MASK;
++ ipu_csi_write(ipu, csi, temp |
++ (div_ratio << CSI_SENS_CONF_DIVRATIO_SHIFT),
++ CSI_SENS_CONF);
++
++ return 0;
++}
++
++/*!
++ * ipu_csi_init_interface
++ * Sets initial values for the CSI registers.
++ * The width and height of the sensor and the actual frame size will be
++ * set to the same values.
++ * @param ipu ipu handler
++ * @param width Sensor width
++ * @param height Sensor height
++ * @param pixel_fmt pixel format
++ * @param cfg_param ipu_csi_signal_cfg_t structure
++ * @param csi csi 0 or csi 1
++ *
++ * @return 0 for success, -EINVAL for error
++ */
++int32_t
++ipu_csi_init_interface(struct ipu_soc *ipu, uint16_t width, uint16_t height,
++ uint32_t pixel_fmt, ipu_csi_signal_cfg_t cfg_param)
++{
++ uint32_t data = 0;
++ uint32_t csi = cfg_param.csi;
++
++ /* Set SENS_DATA_FORMAT bits (8, 9 and 10)
++ RGB or YUV444 is 0 which is current value in data so not set
++ explicitly
++ This is also the default value if attempts are made to set it to
++ something invalid. */
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_YUYV:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
++ break;
++ case IPU_PIX_FMT_UYVY:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
++ break;
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_BGR24:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB_YUV444;
++ break;
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_GENERIC_16:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
++ break;
++ case IPU_PIX_FMT_RGB565:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB565;
++ break;
++ case IPU_PIX_FMT_RGB555:
++ cfg_param.data_fmt = CSI_SENS_CONF_DATA_FMT_RGB555;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* Set the CSI_SENS_CONF register remaining fields */
++ data |= cfg_param.data_width << CSI_SENS_CONF_DATA_WIDTH_SHIFT |
++ cfg_param.data_fmt << CSI_SENS_CONF_DATA_FMT_SHIFT |
++ cfg_param.data_pol << CSI_SENS_CONF_DATA_POL_SHIFT |
++ cfg_param.Vsync_pol << CSI_SENS_CONF_VSYNC_POL_SHIFT |
++ cfg_param.Hsync_pol << CSI_SENS_CONF_HSYNC_POL_SHIFT |
++ cfg_param.pixclk_pol << CSI_SENS_CONF_PIX_CLK_POL_SHIFT |
++ cfg_param.ext_vsync << CSI_SENS_CONF_EXT_VSYNC_SHIFT |
++ cfg_param.clk_mode << CSI_SENS_CONF_SENS_PRTCL_SHIFT |
++ cfg_param.pack_tight << CSI_SENS_CONF_PACK_TIGHT_SHIFT |
++ cfg_param.force_eof << CSI_SENS_CONF_FORCE_EOF_SHIFT |
++ cfg_param.data_en_pol << CSI_SENS_CONF_DATA_EN_POL_SHIFT;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu_csi_write(ipu, csi, data, CSI_SENS_CONF);
++
++ /* Setup sensor frame size */
++ ipu_csi_write(ipu, csi, (width - 1) | (height - 1) << 16, CSI_SENS_FRM_SIZE);
++
++ /* Set CCIR registers */
++ if (cfg_param.clk_mode == IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE) {
++ ipu_csi_write(ipu, csi, 0x40030, CSI_CCIR_CODE_1);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ } else if (cfg_param.clk_mode == IPU_CSI_CLK_MODE_CCIR656_INTERLACED) {
++ if (width == 720 && height == 625) {
++ /* PAL case */
++ /*
++ * Field0BlankEnd = 0x6, Field0BlankStart = 0x2,
++ * Field0ActiveEnd = 0x4, Field0ActiveStart = 0
++ */
++ ipu_csi_write(ipu, csi, 0x40596, CSI_CCIR_CODE_1);
++ /*
++ * Field1BlankEnd = 0x7, Field1BlankStart = 0x3,
++ * Field1ActiveEnd = 0x5, Field1ActiveStart = 0x1
++ */
++ ipu_csi_write(ipu, csi, 0xD07DF, CSI_CCIR_CODE_2);
++
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++
++ } else if (width == 720 && height == 525) {
++ /* NTSC case */
++ /*
++ * Field0BlankEnd = 0x7, Field0BlankStart = 0x3,
++ * Field0ActiveEnd = 0x5, Field0ActiveStart = 0x1
++ */
++ ipu_csi_write(ipu, csi, 0xD07DF, CSI_CCIR_CODE_1);
++ /*
++ * Field1BlankEnd = 0x6, Field1BlankStart = 0x2,
++ * Field1ActiveEnd = 0x4, Field1ActiveStart = 0
++ */
++ ipu_csi_write(ipu, csi, 0x40596, CSI_CCIR_CODE_2);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ } else {
++ dev_err(ipu->dev, "Unsupported CCIR656 interlaced "
++ "video mode\n");
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return -EINVAL;
++ }
++ _ipu_csi_ccir_err_detection_enable(ipu, csi);
++ } else if ((cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR) ||
++ (cfg_param.clk_mode ==
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR)) {
++ ipu_csi_write(ipu, csi, 0x40030, CSI_CCIR_CODE_1);
++ ipu_csi_write(ipu, csi, 0xFF0000, CSI_CCIR_CODE_3);
++ _ipu_csi_ccir_err_detection_enable(ipu, csi);
++ } else if ((cfg_param.clk_mode == IPU_CSI_CLK_MODE_GATED_CLK) ||
++ (cfg_param.clk_mode == IPU_CSI_CLK_MODE_NONGATED_CLK)) {
++ _ipu_csi_ccir_err_detection_disable(ipu, csi);
++ }
++
++ dev_dbg(ipu->dev, "CSI_SENS_CONF = 0x%08X\n",
++ ipu_csi_read(ipu, csi, CSI_SENS_CONF));
++ dev_dbg(ipu->dev, "CSI_ACT_FRM_SIZE = 0x%08X\n",
++ ipu_csi_read(ipu, csi, CSI_ACT_FRM_SIZE));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_csi_init_interface);
++
++/*!
++ * ipu_csi_get_sensor_protocol
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns sensor protocol
++ */
++int32_t ipu_csi_get_sensor_protocol(struct ipu_soc *ipu, uint32_t csi)
++{
++ int ret;
++ _ipu_get(ipu);
++ ret = (ipu_csi_read(ipu, csi, CSI_SENS_CONF) &
++ CSI_SENS_CONF_SENS_PRTCL_MASK) >>
++ CSI_SENS_CONF_SENS_PRTCL_SHIFT;
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_csi_get_sensor_protocol);
++
++/*!
++ * ipu_csi_enable_mclk
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ * @param flag true to enable mclk, false to disable mclk
++ * @param wait true to wait 100ms make clock stable, false not wait
++ *
++ * @return Returns 0 on success
++ */
++int ipu_csi_enable_mclk(struct ipu_soc *ipu, int csi, bool flag, bool wait)
++{
++ /* Return immediately if there is no csi_clk to manage */
++ if (ipu->csi_clk[csi] == NULL)
++ return 0;
++
++ if (flag) {
++ clk_enable(ipu->csi_clk[csi]);
++ if (wait == true)
++ msleep(10);
++ } else {
++ clk_disable(ipu->csi_clk[csi]);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_csi_enable_mclk);
++
++/*!
++ * ipu_csi_get_window_size
++ *
++ * @param ipu ipu handler
++ * @param width pointer to window width
++ * @param height pointer to window height
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_get_window_size(struct ipu_soc *ipu, uint32_t *width, uint32_t *height, uint32_t csi)
++{
++ uint32_t reg;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ reg = ipu_csi_read(ipu, csi, CSI_ACT_FRM_SIZE);
++ *width = (reg & 0xFFFF) + 1;
++ *height = (reg >> 16 & 0xFFFF) + 1;
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_get_window_size);
++
++/*!
++ * ipu_csi_set_window_size
++ *
++ * @param ipu ipu handler
++ * @param width window width
++ * @param height window height
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_set_window_size(struct ipu_soc *ipu, uint32_t width, uint32_t height, uint32_t csi)
++{
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu_csi_write(ipu, csi, (width - 1) | (height - 1) << 16, CSI_ACT_FRM_SIZE);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_set_window_size);
++
++/*!
++ * ipu_csi_set_window_pos
++ *
++ * @param ipu ipu handler
++ * @param left uint32 window x start
++ * @param top uint32 window y start
++ * @param csi csi 0 or csi 1
++ */
++void ipu_csi_set_window_pos(struct ipu_soc *ipu, uint32_t left, uint32_t top, uint32_t csi)
++{
++ uint32_t temp;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~(CSI_HSC_MASK | CSI_VSC_MASK);
++ temp |= ((top << CSI_VSC_SHIFT) | (left << CSI_HSC_SHIFT));
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_csi_set_window_pos);
++
++/*!
++ * _ipu_csi_horizontal_downsize_enable
++ * Enable horizontal downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_horizontal_downsize_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp |= CSI_HORI_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_horizontal_downsize_disable
++ * Disable horizontal downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_horizontal_downsize_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~CSI_HORI_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_vertical_downsize_enable
++ * Enable vertical downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_vertical_downsize_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp |= CSI_VERT_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_vertical_downsize_disable
++ * Disable vertical downsizing(decimation) by 2.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_vertical_downsize_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_OUT_FRM_CTRL);
++ temp &= ~CSI_VERT_DOWNSIZE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_OUT_FRM_CTRL);
++}
++
++/*!
++ * _ipu_csi_set_test_generator
++ *
++ * @param ipu ipu handler
++ * @param active 1 for active and 0 for inactive
++ * @param r_value red value for the generated pattern of even pixel
++ * @param g_value green value for the generated pattern of even
++ * pixel
++ * @param b_value blue value for the generated pattern of even pixel
++ * @param pixel_clk desired pixel clock frequency in Hz
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_set_test_generator(struct ipu_soc *ipu, bool active, uint32_t r_value,
++ uint32_t g_value, uint32_t b_value, uint32_t pix_clk, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_TST_CTRL);
++
++ if (active == false) {
++ temp &= ~CSI_TEST_GEN_MODE_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_TST_CTRL);
++ } else {
++ /* Set sensb_mclk div_ratio*/
++ _ipu_csi_mclk_set(ipu, pix_clk, csi);
++
++ temp &= ~(CSI_TEST_GEN_R_MASK | CSI_TEST_GEN_G_MASK |
++ CSI_TEST_GEN_B_MASK);
++ temp |= CSI_TEST_GEN_MODE_EN;
++ temp |= (r_value << CSI_TEST_GEN_R_SHIFT) |
++ (g_value << CSI_TEST_GEN_G_SHIFT) |
++ (b_value << CSI_TEST_GEN_B_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_TST_CTRL);
++ }
++}
++
++/*!
++ * _ipu_csi_ccir_err_detection_en
++ * Enable error detection and correction for
++ * CCIR interlaced mode with protection bit.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_ccir_err_detection_enable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_CCIR_CODE_1);
++ temp |= CSI_CCIR_ERR_DET_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_CCIR_CODE_1);
++
++}
++
++/*!
++ * _ipu_csi_ccir_err_detection_disable
++ * Disable error detection and correction for
++ * CCIR interlaced mode with protection bit.
++ *
++ * @param ipu ipu handler
++ * @param csi csi 0 or csi 1
++ */
++void _ipu_csi_ccir_err_detection_disable(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_csi_read(ipu, csi, CSI_CCIR_CODE_1);
++ temp &= ~CSI_CCIR_ERR_DET_EN;
++ ipu_csi_write(ipu, csi, temp, CSI_CCIR_CODE_1);
++
++}
++
++/*!
++ * _ipu_csi_set_mipi_di
++ *
++ * @param ipu ipu handler
++ * @param num MIPI data identifier 0-3 handled by CSI
++ * @param di_val data identifier value
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_mipi_di(struct ipu_soc *ipu, uint32_t num, uint32_t di_val, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (di_val > 0xFFL) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_MIPI_DI);
++
++ switch (num) {
++ case IPU_CSI_MIPI_DI0:
++ temp &= ~CSI_MIPI_DI0_MASK;
++ temp |= (di_val << CSI_MIPI_DI0_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI1:
++ temp &= ~CSI_MIPI_DI1_MASK;
++ temp |= (di_val << CSI_MIPI_DI1_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI2:
++ temp &= ~CSI_MIPI_DI2_MASK;
++ temp |= (di_val << CSI_MIPI_DI2_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ case IPU_CSI_MIPI_DI3:
++ temp &= ~CSI_MIPI_DI3_MASK;
++ temp |= (di_val << CSI_MIPI_DI3_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_MIPI_DI);
++ break;
++ default:
++ retval = -EINVAL;
++ }
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_csi_set_skip_isp
++ *
++ * @param ipu ipu handler
++ * @param skip select frames to be skipped and set the
++ * correspond bits to 1
++ * @param max_ratio number of frames in a skipping set and the
++ * maximum value of max_ratio is 5
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_skip_isp(struct ipu_soc *ipu, uint32_t skip, uint32_t max_ratio, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (max_ratio > 5) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SKIP);
++ temp &= ~(CSI_MAX_RATIO_SKIP_ISP_MASK | CSI_SKIP_ISP_MASK);
++ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_ISP_SHIFT) |
++ (skip << CSI_SKIP_ISP_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_SKIP);
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_csi_set_skip_smfc
++ *
++ * @param ipu ipu handler
++ * @param skip select frames to be skipped and set the
++ * correspond bits to 1
++ * @param max_ratio number of frames in a skipping set and the
++ * maximum value of max_ratio is 5
++ * @param id csi to smfc skipping id
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_set_skip_smfc(struct ipu_soc *ipu, uint32_t skip,
++ uint32_t max_ratio, uint32_t id, uint32_t csi)
++{
++ uint32_t temp;
++ int retval = 0;
++
++ if (max_ratio > 5 || id > 3) {
++ retval = -EINVAL;
++ goto err;
++ }
++
++ temp = ipu_csi_read(ipu, csi, CSI_SKIP);
++ temp &= ~(CSI_MAX_RATIO_SKIP_SMFC_MASK | CSI_ID_2_SKIP_MASK |
++ CSI_SKIP_SMFC_MASK);
++ temp |= (max_ratio << CSI_MAX_RATIO_SKIP_SMFC_SHIFT) |
++ (id << CSI_ID_2_SKIP_SHIFT) |
++ (skip << CSI_SKIP_SMFC_SHIFT);
++ ipu_csi_write(ipu, csi, temp, CSI_SKIP);
++
++err:
++ return retval;
++}
++
++/*!
++ * _ipu_smfc_init
++ * Map CSI frames to IDMAC channels.
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param mipi_id mipi id number 0-3
++ * @param csi csi0 or csi1
++ */
++void _ipu_smfc_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t mipi_id, uint32_t csi)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_MAP);
++
++ switch (channel) {
++ case CSI_MEM0:
++ temp &= ~SMFC_MAP_CH0_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH0_SHIFT;
++ break;
++ case CSI_MEM1:
++ temp &= ~SMFC_MAP_CH1_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH1_SHIFT;
++ break;
++ case CSI_MEM2:
++ temp &= ~SMFC_MAP_CH2_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH2_SHIFT;
++ break;
++ case CSI_MEM3:
++ temp &= ~SMFC_MAP_CH3_MASK;
++ temp |= ((csi << 2) | mipi_id) << SMFC_MAP_CH3_SHIFT;
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_MAP);
++}
++
++/*!
++ * _ipu_smfc_set_wmc
++ * Caution: The number of required channels, the enabled channels
++ * and the FIFO size per channel are configured restrictedly.
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param set set 1 or clear 0
++ * @param level water mark level when FIFO is on the
++ * relative size
++ */
++void _ipu_smfc_set_wmc(struct ipu_soc *ipu, ipu_channel_t channel, bool set, uint32_t level)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_WMC);
++
++ switch (channel) {
++ case CSI_MEM0:
++ if (set == true) {
++ temp &= ~SMFC_WM0_SET_MASK;
++ temp |= level << SMFC_WM0_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM0_CLR_MASK;
++ temp |= level << SMFC_WM0_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM1:
++ if (set == true) {
++ temp &= ~SMFC_WM1_SET_MASK;
++ temp |= level << SMFC_WM1_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM1_CLR_MASK;
++ temp |= level << SMFC_WM1_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM2:
++ if (set == true) {
++ temp &= ~SMFC_WM2_SET_MASK;
++ temp |= level << SMFC_WM2_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM2_CLR_MASK;
++ temp |= level << SMFC_WM2_CLR_SHIFT;
++ }
++ break;
++ case CSI_MEM3:
++ if (set == true) {
++ temp &= ~SMFC_WM3_SET_MASK;
++ temp |= level << SMFC_WM3_SET_SHIFT;
++ } else {
++ temp &= ~SMFC_WM3_CLR_MASK;
++ temp |= level << SMFC_WM3_CLR_SHIFT;
++ }
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_WMC);
++}
++
++/*!
++ * _ipu_smfc_set_burst_size
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel 0-3
++ * @param bs burst size of IDMAC channel,
++ * the value programmed here shoud be BURST_SIZE-1
++ */
++void _ipu_smfc_set_burst_size(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t bs)
++{
++ uint32_t temp;
++
++ temp = ipu_smfc_read(ipu, SMFC_BS);
++
++ switch (channel) {
++ case CSI_MEM0:
++ temp &= ~SMFC_BS0_MASK;
++ temp |= bs << SMFC_BS0_SHIFT;
++ break;
++ case CSI_MEM1:
++ temp &= ~SMFC_BS1_MASK;
++ temp |= bs << SMFC_BS1_SHIFT;
++ break;
++ case CSI_MEM2:
++ temp &= ~SMFC_BS2_MASK;
++ temp |= bs << SMFC_BS2_SHIFT;
++ break;
++ case CSI_MEM3:
++ temp &= ~SMFC_BS3_MASK;
++ temp |= bs << SMFC_BS3_SHIFT;
++ break;
++ default:
++ return;
++ }
++
++ ipu_smfc_write(ipu, temp, SMFC_BS);
++}
++
++/*!
++ * _ipu_csi_init
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel
++ * @param csi csi 0 or csi 1
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int _ipu_csi_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t csi)
++{
++ uint32_t csi_sens_conf, csi_dest;
++ int retval = 0;
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ csi_dest = CSI_DATA_DEST_IDMAC;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case CSI_PRP_VF_MEM:
++ csi_dest = CSI_DATA_DEST_IC;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ csi_sens_conf = ipu_csi_read(ipu, csi, CSI_SENS_CONF);
++ csi_sens_conf &= ~CSI_SENS_CONF_DATA_DEST_MASK;
++ ipu_csi_write(ipu, csi, csi_sens_conf | (csi_dest <<
++ CSI_SENS_CONF_DATA_DEST_SHIFT), CSI_SENS_CONF);
++err:
++ return retval;
++}
++
++/*!
++ * csi_irq_handler
++ *
++ * @param irq interrupt id
++ * @param dev_id pointer to ipu handler
++ *
++ * @return Returns if irq is handled
++ */
++static irqreturn_t csi_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_soc *ipu = dev_id;
++ struct completion *comp = &ipu->csi_comp;
++
++ complete(comp);
++ return IRQ_HANDLED;
++}
++
++/*!
++ * _ipu_csi_wait4eof
++ *
++ * @param ipu ipu handler
++ * @param channel IDMAC channel
++ *
++ */
++void _ipu_csi_wait4eof(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int ret;
++ int irq = 0;
++
++ if (channel == CSI_MEM0)
++ irq = IPU_IRQ_CSI0_OUT_EOF;
++ else if (channel == CSI_MEM1)
++ irq = IPU_IRQ_CSI1_OUT_EOF;
++ else if (channel == CSI_MEM2)
++ irq = IPU_IRQ_CSI2_OUT_EOF;
++ else if (channel == CSI_MEM3)
++ irq = IPU_IRQ_CSI3_OUT_EOF;
++ else if (channel == CSI_PRP_ENC_MEM)
++ irq = IPU_IRQ_PRP_ENC_OUT_EOF;
++ else if (channel == CSI_PRP_VF_MEM)
++ irq = IPU_IRQ_PRP_VF_OUT_EOF;
++ else{
++ dev_err(ipu->dev, "Not a CSI channel\n");
++ return;
++ }
++
++ init_completion(&ipu->csi_comp);
++ ret = ipu_request_irq(ipu, irq, csi_irq_handler, 0, NULL, ipu);
++ if (ret < 0) {
++ dev_err(ipu->dev, "CSI irq %d in use\n", irq);
++ return;
++ }
++ ret = wait_for_completion_timeout(&ipu->csi_comp, msecs_to_jiffies(500));
++ ipu_free_irq(ipu, irq, ipu);
++ dev_dbg(ipu->dev, "CSI stop timeout - %d * 10ms\n", 5 - ret);
++}
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_common.c linux-xbian-imx6/drivers/mxc/ipu3/ipu_common.c
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_common.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_common.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,3151 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_common.c
++ *
++ * @brief This file contains the IPU driver common API functions.
++ *
++ * @ingroup IPU
++ */
++
++
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/clk/clk-conf.h>
++
++
++#include <linux/busfreq-imx6.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/irq.h>
++#include <linux/irqdesc.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/pm_runtime.h>
++#include <linux/reset.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include <asm/cacheflush.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
++int g_ipu_hw_rev;
++
++/* Static functions */
++static irqreturn_t ipu_sync_irq_handler(int irq, void *desc);
++static irqreturn_t ipu_err_irq_handler(int irq, void *desc);
++
++static inline uint32_t channel_2_dma(ipu_channel_t ch, ipu_buffer_t type)
++{
++ return ((uint32_t) ch >> (6 * type)) & 0x3F;
++};
++
++static inline int _ipu_is_ic_chan(uint32_t dma_chan)
++{
++ return (((dma_chan >= 11) && (dma_chan <= 22) && (dma_chan != 17) &&
++ (dma_chan != 18)));
++}
++
++static inline int _ipu_is_vdi_out_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 5);
++}
++
++static inline int _ipu_is_ic_graphic_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 14 || dma_chan == 15);
++}
++
++/* Either DP BG or DP FG can be graphic window */
++static inline int _ipu_is_dp_graphic_chan(uint32_t dma_chan)
++{
++ return (dma_chan == 23 || dma_chan == 27);
++}
++
++static inline int _ipu_is_irt_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 45) && (dma_chan <= 50));
++}
++
++static inline int _ipu_is_dmfc_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 23) && (dma_chan <= 29));
++}
++
++static inline int _ipu_is_smfc_chan(uint32_t dma_chan)
++{
++ return ((dma_chan >= 0) && (dma_chan <= 3));
++}
++
++static inline int _ipu_is_trb_chan(uint32_t dma_chan)
++{
++ return (((dma_chan == 8) || (dma_chan == 9) ||
++ (dma_chan == 10) || (dma_chan == 13) ||
++ (dma_chan == 21) || (dma_chan == 23) ||
++ (dma_chan == 27) || (dma_chan == 28)) &&
++ (g_ipu_hw_rev >= IPU_V3DEX));
++}
++
++/*
++ * We usually use IDMAC 23 as full plane and IDMAC 27 as partial
++ * plane.
++ * IDMAC 23/24/28/41 can drive a display respectively - primary
++ * IDMAC 27 depends on IDMAC 23 - nonprimary
++ */
++static inline int _ipu_is_primary_disp_chan(uint32_t dma_chan)
++{
++ return ((dma_chan == 23) || (dma_chan == 24) ||
++ (dma_chan == 28) || (dma_chan == 41));
++}
++
++static inline int _ipu_is_sync_irq(uint32_t irq)
++{
++ /* sync interrupt register number */
++ int reg_num = irq / 32 + 1;
++
++ return ((reg_num == 1) || (reg_num == 2) || (reg_num == 3) ||
++ (reg_num == 4) || (reg_num == 7) || (reg_num == 8) ||
++ (reg_num == 11) || (reg_num == 12) || (reg_num == 13) ||
++ (reg_num == 14) || (reg_num == 15));
++}
++
++#define idma_is_valid(ch) (ch != NO_DMA)
++#define idma_mask(ch) (idma_is_valid(ch) ? (1UL << (ch & 0x1F)) : 0)
++#define idma_is_set(ipu, reg, dma) (ipu_idmac_read(ipu, reg(dma)) & idma_mask(dma))
++#define tri_cur_buf_mask(ch) (idma_mask(ch*2) * 3)
++#define tri_cur_buf_shift(ch) (ffs(idma_mask(ch*2)) - 1)
++
++static int ipu_clk_setup_enable(struct ipu_soc *ipu,
++ struct ipu_pltfm_data *pdata)
++{
++ char pixel_clk_0[] = "ipu1_pclk_0";
++ char pixel_clk_1[] = "ipu1_pclk_1";
++ char pixel_clk_0_sel[] = "ipu1_pclk0_sel";
++ char pixel_clk_1_sel[] = "ipu1_pclk1_sel";
++ char pixel_clk_0_div[] = "ipu1_pclk0_div";
++ char pixel_clk_1_div[] = "ipu1_pclk1_div";
++ char *ipu_pixel_clk_sel1[] = { "ipu1", "ipu1_di0", "ipu1_di1", };
++ char *ipu_pixel_clk_sel2[] = { "ipu2", "ipu2_di0", "ipu2_di1", };
++ char **ipu_pixel_clk_sel;
++ struct clk *clk;
++ int ret;
++
++ strncpy(ipu->pixel_clk_0, pixel_clk_0, sizeof(ipu->pixel_clk_0));
++ ipu->pixel_clk_0[3] += pdata->id;
++ strncpy(ipu->pixel_clk_1, pixel_clk_1, sizeof(ipu->pixel_clk_1));
++ ipu->pixel_clk_1[3] += pdata->id;
++ strncpy(ipu->pixel_clk_0_sel, pixel_clk_0_sel, sizeof(ipu->pixel_clk_0_sel));
++ ipu->pixel_clk_0_sel[3] += pdata->id;
++ strncpy(ipu->pixel_clk_1_sel, pixel_clk_1_sel, sizeof(ipu->pixel_clk_1_sel));
++ ipu->pixel_clk_1_sel[3] += pdata->id;
++ strncpy(ipu->pixel_clk_0_div, pixel_clk_0_div, sizeof(ipu->pixel_clk_0_div));
++ ipu->pixel_clk_0_div[3] += pdata->id;
++ strncpy(ipu->pixel_clk_1_div, pixel_clk_1_div, sizeof(ipu->pixel_clk_1_div));
++ ipu->pixel_clk_1_div[3] += pdata->id;
++
++ if (pdata->id == 0)
++ ipu_pixel_clk_sel = ipu_pixel_clk_sel1;
++ else
++ ipu_pixel_clk_sel = ipu_pixel_clk_sel2;
++
++ clk = clk_register_mux_pix_clk(ipu->dev, ipu->pixel_clk_0_sel,
++ (const char **)ipu_pixel_clk_sel,
++ ARRAY_SIZE(ipu_pixel_clk_sel1),
++ 0, pdata->id, 0, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk_register mux di0 failed");
++ return PTR_ERR(clk);
++ }
++ ipu->pixel_clk_sel[0] = clk;
++ clk = clk_register_mux_pix_clk(ipu->dev, ipu->pixel_clk_1_sel,
++ (const char **)ipu_pixel_clk_sel,
++ ARRAY_SIZE(ipu_pixel_clk_sel1),
++ 0, pdata->id, 1, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk_register mux di1 failed");
++ return PTR_ERR(clk);
++ }
++ ipu->pixel_clk_sel[1] = clk;
++
++ clk = clk_register_div_pix_clk(ipu->dev, ipu->pixel_clk_0_div,
++ ipu->pixel_clk_0_sel, 0, pdata->id, 0, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk register di0 div failed");
++ return PTR_ERR(clk);
++ }
++ clk = clk_register_div_pix_clk(ipu->dev, ipu->pixel_clk_1_div,
++ ipu->pixel_clk_1_sel, CLK_SET_RATE_PARENT, pdata->id, 1, 0);
++ if (IS_ERR(clk)) {
++ dev_err(ipu->dev, "clk register di1 div failed");
++ return PTR_ERR(clk);
++ }
++
++ ipu->pixel_clk[0] = clk_register_gate_pix_clk(ipu->dev, ipu->pixel_clk_0,
++ ipu->pixel_clk_0_div, CLK_SET_RATE_PARENT,
++ pdata->id, 0, 0);
++ if (IS_ERR(ipu->pixel_clk[0])) {
++ dev_err(ipu->dev, "clk register di0 gate failed");
++ return PTR_ERR(ipu->pixel_clk[0]);
++ }
++ ipu->pixel_clk[1] = clk_register_gate_pix_clk(ipu->dev, ipu->pixel_clk_1,
++ ipu->pixel_clk_1_div, CLK_SET_RATE_PARENT,
++ pdata->id, 1, 0);
++ if (IS_ERR(ipu->pixel_clk[1])) {
++ dev_err(ipu->dev, "clk register di1 gate failed");
++ return PTR_ERR(ipu->pixel_clk[1]);
++ }
++
++ ret = clk_set_parent(ipu->pixel_clk_sel[0], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "clk set parent failed %s, %d", __clk_get_name(ipu->ipu_clk), ret);
++ return ret;
++ }
++
++ ret = clk_set_parent(ipu->pixel_clk_sel[1], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "clk set parent failed %s, %d", __clk_get_name(ipu->ipu_clk), ret);
++ return ret;
++ }
++
++ ipu->di_clk[0] = devm_clk_get(ipu->dev, "di0");
++ if (IS_ERR(ipu->di_clk[0])) {
++ dev_err(ipu->dev, "clk_get di0 failed");
++ return PTR_ERR(ipu->di_clk[0]);
++ }
++ ipu->di_clk[1] = devm_clk_get(ipu->dev, "di1");
++ if (IS_ERR(ipu->di_clk[1])) {
++ dev_err(ipu->dev, "clk_get di1 failed");
++ return PTR_ERR(ipu->di_clk[1]);
++ }
++
++ ipu->di_clk_sel[0] = devm_clk_get(ipu->dev, "di0_sel");
++ if (IS_ERR(ipu->di_clk_sel[0])) {
++ dev_err(ipu->dev, "clk_get di0_sel failed");
++ return PTR_ERR(ipu->di_clk_sel[0]);
++ }
++ ipu->di_clk_sel[1] = devm_clk_get(ipu->dev, "di1_sel");
++ if (IS_ERR(ipu->di_clk_sel[1])) {
++ dev_err(ipu->dev, "clk_get di1_sel failed");
++ return PTR_ERR(ipu->di_clk_sel[1]);
++ }
++
++ return 0;
++}
++
++static int ipu_mem_reset(struct ipu_soc *ipu)
++{
++ int timeout = 1000;
++
++ ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
++
++ while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
++ if (!timeout--)
++ return -ETIME;
++ msleep(1);
++ }
++
++ return 0;
++}
++
++struct ipu_soc *ipu_get_soc(int id)
++{
++ if (id >= MXC_IPU_MAX_NUM)
++ return ERR_PTR(-ENODEV);
++ else if (!ipu_array[id].online)
++ return ERR_PTR(-ENODEV);
++ else
++ return &(ipu_array[id]);
++}
++EXPORT_SYMBOL_GPL(ipu_get_soc);
++
++void _ipu_get(struct ipu_soc *ipu)
++{
++ int ret;
++
++ ret = clk_enable(ipu->ipu_clk);
++ if (ret < 0)
++ BUG();
++}
++
++void _ipu_put(struct ipu_soc *ipu)
++{
++ clk_disable(ipu->ipu_clk);
++}
++
++void ipu_disable_hsp_clk(struct ipu_soc *ipu)
++{
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_disable_hsp_clk);
++
++static struct platform_device_id imx_ipu_type[] = {
++ {
++ .name = "ipu-imx6q",
++ .driver_data = IPU_V3H,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_ipu_type);
++
++static const struct of_device_id imx_ipuv3_dt_ids[] = {
++ { .compatible = "fsl,imx6q-ipu", .data = &imx_ipu_type[IMX6Q_IPU], },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_ipuv3_dt_ids);
++
++/*!
++ * This function is called by the driver framework to initialize the IPU
++ * hardware.
++ *
++ * @param dev The device structure for the IPU passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int ipu_probe(struct platform_device *pdev)
++{
++ struct ipu_soc *ipu;
++ struct resource *res;
++ unsigned long ipu_base;
++ const struct of_device_id *of_id =
++ of_match_device(imx_ipuv3_dt_ids, &pdev->dev);
++ struct ipu_pltfm_data *pltfm_data;
++ int ret = 0;
++ u32 bypass_reset;
++
++ dev_dbg(&pdev->dev, "<%s>\n", __func__);
++
++ pltfm_data = devm_kzalloc(&pdev->dev, sizeof(struct ipu_pltfm_data),
++ GFP_KERNEL);
++ if (!pltfm_data)
++ return -ENOMEM;
++
++ ret = of_property_read_u32(pdev->dev.of_node,
++ "bypass_reset", &bypass_reset);
++ if (ret < 0) {
++ dev_dbg(&pdev->dev, "can not get bypass_reset\n");
++ return ret;
++ }
++ pltfm_data->bypass_reset = (bool)bypass_reset;
++
++ pltfm_data->id = of_alias_get_id(pdev->dev.of_node, "ipu");
++ if (pltfm_data->id < 0) {
++ dev_dbg(&pdev->dev, "can not get alias id\n");
++ return pltfm_data->id;
++ } else
++ dev_dbg(&pdev->dev, "get alias id %d\n", pltfm_data->id);
++
++ if (of_id)
++ pdev->id_entry = of_id->data;
++ pltfm_data->devtype = pdev->id_entry->driver_data;
++ g_ipu_hw_rev = pltfm_data->devtype;
++
++ ipu = &ipu_array[pltfm_data->id];
++ memset(ipu, 0, sizeof(struct ipu_soc));
++ ipu->dev = &pdev->dev;
++ ipu->pdata = pltfm_data;
++ dev_dbg(ipu->dev, "IPU rev:%d\n", g_ipu_hw_rev);
++ spin_lock_init(&ipu->int_reg_spin_lock);
++ spin_lock_init(&ipu->rdy_reg_spin_lock);
++ mutex_init(&ipu->mutex_lock);
++
++ ipu->irq_sync = platform_get_irq(pdev, 0);
++ ipu->irq_err = platform_get_irq(pdev, 1);
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (!res || ipu->irq_sync < 0 || ipu->irq_err < 0) {
++ dev_err(&pdev->dev, "can't get device resources\n");
++ return -ENODEV;
++ }
++
++ if (!devm_request_mem_region(&pdev->dev, res->start,
++ resource_size(res), pdev->name))
++ return -EBUSY;
++
++ ret = devm_request_irq(&pdev->dev, ipu->irq_sync,
++ ipu_sync_irq_handler, 0, pdev->name, ipu);
++ if (ret) {
++ dev_err(ipu->dev, "request SYNC interrupt failed\n");
++ return ret;
++ }
++ ret = devm_request_irq(&pdev->dev, ipu->irq_err,
++ ipu_err_irq_handler, 0, pdev->name, ipu);
++ if (ret) {
++ dev_err(ipu->dev, "request ERR interrupt failed\n");
++ return ret;
++ }
++
++ ipu_base = res->start;
++ /* base fixup */
++ if (g_ipu_hw_rev == IPU_V3H) /* IPUv3H */
++ ipu_base += IPUV3H_REG_BASE;
++ else if (g_ipu_hw_rev == IPU_V3M) /* IPUv3M */
++ ipu_base += IPUV3M_REG_BASE;
++ else /* IPUv3D, v3E, v3EX */
++ ipu_base += IPUV3DEX_REG_BASE;
++
++ ipu->cm_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CM_REG_BASE, PAGE_SIZE);
++ ipu->ic_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_IC_REG_BASE, PAGE_SIZE);
++ ipu->idmac_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_IDMAC_REG_BASE, PAGE_SIZE);
++ /* DP Registers are accessed thru the SRM */
++ ipu->dp_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_SRM_REG_BASE, PAGE_SIZE);
++ ipu->dc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DC_REG_BASE, PAGE_SIZE);
++ ipu->dmfc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DMFC_REG_BASE, PAGE_SIZE);
++ ipu->di_reg[0] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DI0_REG_BASE, PAGE_SIZE);
++ ipu->di_reg[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DI1_REG_BASE, PAGE_SIZE);
++ ipu->smfc_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_SMFC_REG_BASE, PAGE_SIZE);
++ ipu->csi_reg[0] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CSI0_REG_BASE, PAGE_SIZE);
++ ipu->csi_reg[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CSI1_REG_BASE, PAGE_SIZE);
++ ipu->cpmem_base = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_CPMEM_REG_BASE, SZ_128K);
++ ipu->tpmem_base = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_TPM_REG_BASE, SZ_64K);
++ ipu->dc_tmpl_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DC_TMPL_REG_BASE, SZ_128K);
++ ipu->vdi_reg = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_VDI_REG_BASE, PAGE_SIZE);
++ ipu->disp_base[1] = devm_ioremap(&pdev->dev,
++ ipu_base + IPU_DISP1_BASE, SZ_4K);
++ if (!ipu->cm_reg || !ipu->ic_reg || !ipu->idmac_reg ||
++ !ipu->dp_reg || !ipu->dc_reg || !ipu->dmfc_reg ||
++ !ipu->di_reg[0] || !ipu->di_reg[1] || !ipu->smfc_reg ||
++ !ipu->csi_reg[0] || !ipu->csi_reg[1] || !ipu->cpmem_base ||
++ !ipu->tpmem_base || !ipu->dc_tmpl_reg || !ipu->disp_base[1]
++ || !ipu->vdi_reg)
++ return -ENOMEM;
++
++ dev_dbg(ipu->dev, "IPU CM Regs = %p\n", ipu->cm_reg);
++ dev_dbg(ipu->dev, "IPU IC Regs = %p\n", ipu->ic_reg);
++ dev_dbg(ipu->dev, "IPU IDMAC Regs = %p\n", ipu->idmac_reg);
++ dev_dbg(ipu->dev, "IPU DP Regs = %p\n", ipu->dp_reg);
++ dev_dbg(ipu->dev, "IPU DC Regs = %p\n", ipu->dc_reg);
++ dev_dbg(ipu->dev, "IPU DMFC Regs = %p\n", ipu->dmfc_reg);
++ dev_dbg(ipu->dev, "IPU DI0 Regs = %p\n", ipu->di_reg[0]);
++ dev_dbg(ipu->dev, "IPU DI1 Regs = %p\n", ipu->di_reg[1]);
++ dev_dbg(ipu->dev, "IPU SMFC Regs = %p\n", ipu->smfc_reg);
++ dev_dbg(ipu->dev, "IPU CSI0 Regs = %p\n", ipu->csi_reg[0]);
++ dev_dbg(ipu->dev, "IPU CSI1 Regs = %p\n", ipu->csi_reg[1]);
++ dev_dbg(ipu->dev, "IPU CPMem = %p\n", ipu->cpmem_base);
++ dev_dbg(ipu->dev, "IPU TPMem = %p\n", ipu->tpmem_base);
++ dev_dbg(ipu->dev, "IPU DC Template Mem = %p\n", ipu->dc_tmpl_reg);
++ dev_dbg(ipu->dev, "IPU Display Region 1 Mem = %p\n", ipu->disp_base[1]);
++ dev_dbg(ipu->dev, "IPU VDI Regs = %p\n", ipu->vdi_reg);
++
++ ipu->ipu_clk = devm_clk_get(ipu->dev, "bus");
++ if (IS_ERR(ipu->ipu_clk)) {
++ dev_err(ipu->dev, "clk_get ipu failed");
++ return PTR_ERR(ipu->ipu_clk);
++ }
++
++ /* ipu_clk is always prepared */
++ ret = clk_prepare_enable(ipu->ipu_clk);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ipu clk enable failed\n");
++ return ret;
++ }
++
++ ipu->online = true;
++
++ platform_set_drvdata(pdev, ipu);
++
++ if (!pltfm_data->bypass_reset) {
++ ret = device_reset(&pdev->dev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
++ return ret;
++ }
++
++ ipu_mem_reset(ipu);
++
++ ipu_disp_init(ipu);
++
++ /* Set MCU_T to divide MCU access window into 2 */
++ ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
++ IPU_DISP_GEN);
++ }
++
++ /* setup ipu clk tree after ipu reset */
++ ret = ipu_clk_setup_enable(ipu, pltfm_data);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ipu clk setup failed\n");
++ ipu->online = false;
++ return ret;
++ }
++
++ /* Set sync refresh channels and CSI->mem channel as high priority */
++ ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
++
++ /* Enable error interrupts by default */
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
++
++ if (!pltfm_data->bypass_reset)
++ clk_disable(ipu->ipu_clk);
++
++ register_ipu_device(ipu, ipu->pdata->id);
++
++ pm_runtime_enable(&pdev->dev);
++
++ return ret;
++}
++
++int ipu_remove(struct platform_device *pdev)
++{
++ struct ipu_soc *ipu = platform_get_drvdata(pdev);
++
++ unregister_ipu_device(ipu, ipu->pdata->id);
++
++ clk_put(ipu->ipu_clk);
++
++ return 0;
++}
++
++void ipu_dump_registers(struct ipu_soc *ipu)
++{
++ dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n", ipu_cm_read(ipu, IPU_CONF));
++ dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n", ipu_idmac_read(ipu, IDMAC_CONF));
++ dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
++ dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
++ dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
++ dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
++ ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
++ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
++ dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
++ if (g_ipu_hw_rev >= IPU_V3DEX) {
++ dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL0 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(0)));
++ dev_dbg(ipu->dev, "IPU_CHA_TRB_MODE_SEL1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(32)));
++ }
++ dev_dbg(ipu->dev, "DMFC_WR_CHAN = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_WR_CHAN));
++ dev_dbg(ipu->dev, "DMFC_WR_CHAN_DEF = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_WR_CHAN_DEF));
++ dev_dbg(ipu->dev, "DMFC_DP_CHAN = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_DP_CHAN));
++ dev_dbg(ipu->dev, "DMFC_DP_CHAN_DEF = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_DP_CHAN_DEF));
++ dev_dbg(ipu->dev, "DMFC_IC_CTRL = \t0x%08X\n",
++ ipu_dmfc_read(ipu, DMFC_IC_CTRL));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
++ dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
++ dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
++ ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
++ dev_dbg(ipu->dev, "IPU_VDIC_VDI_FSIZE = \t0x%08X\n",
++ ipu_vdi_read(ipu, VDI_FSIZE));
++ dev_dbg(ipu->dev, "IPU_VDIC_VDI_C = \t0x%08X\n",
++ ipu_vdi_read(ipu, VDI_C));
++ dev_dbg(ipu->dev, "IPU_IC_CONF = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_CONF));
++}
++
++/*!
++ * This function is called to initialize a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID to init.
++ *
++ * @param params Input parameter containing union of channel
++ * initialization parameters.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
++{
++ int ret = 0;
++ bool bad_pixfmt;
++ uint32_t ipu_conf, reg, in_g_pixel_fmt, sec_dma;
++
++ dev_dbg(ipu->dev, "init channel = %d\n", IPU_CHAN_ID(channel));
++
++ ret = pm_runtime_get_sync(ipu->dev);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ch = %d, pm_runtime_get failed:%d!\n",
++ IPU_CHAN_ID(channel), ret);
++ dump_stack();
++ return ret;
++ }
++ /*
++ * Here, ret could be 1 if the device's runtime PM status was
++ * already 'active', so clear it to be 0.
++ */
++ ret = 0;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* Re-enable error interrupts every time a channel is initialized */
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(5));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(6));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(9));
++ ipu_cm_write(ipu, 0xFFFFFFFF, IPU_INT_CTRL(10));
++
++ if (ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) {
++ dev_warn(ipu->dev, "Warning: channel already initialized %d\n",
++ IPU_CHAN_ID(channel));
++ }
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ if (params->csi_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (params->csi_mem.interlaced)
++ ipu->chan_is_interlaced[channel_2_dma(channel,
++ IPU_OUTPUT_BUFFER)] = true;
++ else
++ ipu->chan_is_interlaced[channel_2_dma(channel,
++ IPU_OUTPUT_BUFFER)] = false;
++
++ ipu->smfc_use_count++;
++ ipu->csi_channel[params->csi_mem.csi] = channel;
++
++ /*SMFC setting*/
++ if (params->csi_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_mem.csi));
++ _ipu_smfc_init(ipu, channel, params->csi_mem.mipi_vc,
++ params->csi_mem.csi);
++ _ipu_csi_set_mipi_di(ipu, params->csi_mem.mipi_vc,
++ params->csi_mem.mipi_id, params->csi_mem.csi);
++ } else {
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_mem.csi));
++ _ipu_smfc_init(ipu, channel, 0, params->csi_mem.csi);
++ }
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_mem.csi);
++ break;
++ case CSI_PRP_ENC_MEM:
++ if (params->csi_prp_enc_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++ if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = CSI_PRP_ENC_MEM;
++
++ ipu->ic_use_count++;
++ ipu->csi_channel[params->csi_prp_enc_mem.csi] = channel;
++
++ if (params->csi_prp_enc_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_enc_mem.csi));
++ _ipu_csi_set_mipi_di(ipu,
++ params->csi_prp_enc_mem.mipi_vc,
++ params->csi_prp_enc_mem.mipi_id,
++ params->csi_prp_enc_mem.csi);
++ } else
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_enc_mem.csi));
++
++ /*CSI0/1 feed into IC*/
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ if (params->csi_prp_enc_mem.csi)
++ ipu_conf |= IPU_CONF_CSI_SEL;
++ else
++ ipu_conf &= ~IPU_CONF_CSI_SEL;
++
++ /*PRP skip buffer in memory, only valid when RWS_EN is true*/
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_prp_enc_mem.csi);
++ _ipu_ic_init_prpenc(ipu, params, true);
++ break;
++ case CSI_PRP_VF_MEM:
++ if (params->csi_prp_vf_mem.csi > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++ if ((ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = CSI_PRP_VF_MEM;
++
++ ipu->ic_use_count++;
++ ipu->csi_channel[params->csi_prp_vf_mem.csi] = channel;
++
++ if (params->csi_prp_vf_mem.mipi_en) {
++ ipu_conf |= (1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_vf_mem.csi));
++ _ipu_csi_set_mipi_di(ipu,
++ params->csi_prp_vf_mem.mipi_vc,
++ params->csi_prp_vf_mem.mipi_id,
++ params->csi_prp_vf_mem.csi);
++ } else
++ ipu_conf &= ~(1 << (IPU_CONF_CSI0_DATA_SOURCE_OFFSET +
++ params->csi_prp_vf_mem.csi));
++
++ /*CSI0/1 feed into IC*/
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ if (params->csi_prp_vf_mem.csi)
++ ipu_conf |= IPU_CONF_CSI_SEL;
++ else
++ ipu_conf &= ~IPU_CONF_CSI_SEL;
++
++ /*PRP skip buffer in memory, only valid when RWS_EN is true*/
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ /*CSI data (include compander) dest*/
++ _ipu_csi_init(ipu, channel, params->csi_prp_vf_mem.csi);
++ _ipu_ic_init_prpvf(ipu, params, true);
++ break;
++ case MEM_PRP_VF_MEM:
++ if (params->mem_prp_vf_mem.graphics_combine_en) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ in_g_pixel_fmt = params->mem_prp_vf_mem.in_g_pixel_fmt;
++ bad_pixfmt =
++ _ipu_ch_param_bad_alpha_pos(in_g_pixel_fmt);
++
++ if (params->mem_prp_vf_mem.alpha_chan_en) {
++ if (bad_pixfmt) {
++ dev_err(ipu->dev, "bad pixel format "
++ "for graphics plane from "
++ "ch%d\n", sec_dma);
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg | FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++
++ _ipu_ic_init_prpvf(ipu, params, false);
++ ipu->ic_use_count++;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_MEM) ||
++ (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = MEM_VDI_PRP_VF_MEM;
++ ipu->ic_use_count++;
++ ipu->vdi_use_count++;
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ reg &= ~FS_VDI_SRC_SEL_MASK;
++ ipu_cm_write(ipu, reg , IPU_FS_PROC_FLOW1);
++
++ if (params->mem_prp_vf_mem.graphics_combine_en)
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ _ipu_ic_init_prpvf(ipu, params, false);
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ case MEM_VDI_PRP_VF_MEM_N:
++ case MEM_VDI_MEM_P:
++ case MEM_VDI_MEM_N:
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_VDI_MEM:
++ if ((ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM) ||
++ (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)) {
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->using_ic_dirct_ch = MEM_VDI_MEM;
++ ipu->ic_use_count++;
++ ipu->vdi_use_count++;
++ _ipu_vdi_init(ipu, channel, params);
++ break;
++ case MEM_ROT_VF_MEM:
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ _ipu_ic_init_rotate_vf(ipu, params);
++ break;
++ case MEM_PRP_ENC_MEM:
++ ipu->ic_use_count++;
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg | FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++ _ipu_ic_init_prpenc(ipu, params, false);
++ break;
++ case MEM_ROT_ENC_MEM:
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ _ipu_ic_init_rotate_enc(ipu, params);
++ break;
++ case MEM_PP_MEM:
++ if (params->mem_pp_mem.graphics_combine_en) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ in_g_pixel_fmt = params->mem_pp_mem.in_g_pixel_fmt;
++ bad_pixfmt =
++ _ipu_ch_param_bad_alpha_pos(in_g_pixel_fmt);
++
++ if (params->mem_pp_mem.alpha_chan_en) {
++ if (bad_pixfmt) {
++ dev_err(ipu->dev, "bad pixel format "
++ "for graphics plane from "
++ "ch%d\n", sec_dma);
++ ret = -EINVAL;
++ goto err;
++ }
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = true;
++ }
++
++ _ipu_ic_init_pp(ipu, params);
++ ipu->ic_use_count++;
++ break;
++ case MEM_ROT_PP_MEM:
++ _ipu_ic_init_rotate_pp(ipu, params);
++ ipu->ic_use_count++;
++ ipu->rot_use_count++;
++ break;
++ case MEM_DC_SYNC:
++ if (params->mem_dc_sync.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[1] = params->mem_dc_sync.di;
++ _ipu_dc_init(ipu, 1, params->mem_dc_sync.di,
++ params->mem_dc_sync.interlaced,
++ params->mem_dc_sync.out_pixel_fmt);
++ ipu->di_use_count[params->mem_dc_sync.di]++;
++ ipu->dc_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case MEM_BG_SYNC:
++ if (params->mem_dp_bg_sync.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ if (params->mem_dp_bg_sync.alpha_chan_en)
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++
++ ipu->dc_di_assignment[5] = params->mem_dp_bg_sync.di;
++ _ipu_dp_init(ipu, channel, params->mem_dp_bg_sync.in_pixel_fmt,
++ params->mem_dp_bg_sync.out_pixel_fmt);
++ _ipu_dc_init(ipu, 5, params->mem_dp_bg_sync.di,
++ params->mem_dp_bg_sync.interlaced,
++ params->mem_dp_bg_sync.out_pixel_fmt);
++ ipu->di_use_count[params->mem_dp_bg_sync.di]++;
++ ipu->dc_use_count++;
++ ipu->dp_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case MEM_FG_SYNC:
++ _ipu_dp_init(ipu, channel, params->mem_dp_fg_sync.in_pixel_fmt,
++ params->mem_dp_fg_sync.out_pixel_fmt);
++
++ if (params->mem_dp_fg_sync.alpha_chan_en)
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = true;
++
++ ipu->dc_use_count++;
++ ipu->dp_use_count++;
++ ipu->dmfc_use_count++;
++ break;
++ case DIRECT_ASYNC0:
++ if (params->direct_async.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[8] = params->direct_async.di;
++ _ipu_dc_init(ipu, 8, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
++ ipu->di_use_count[params->direct_async.di]++;
++ ipu->dc_use_count++;
++ break;
++ case DIRECT_ASYNC1:
++ if (params->direct_async.di > 1) {
++ ret = -EINVAL;
++ goto err;
++ }
++
++ ipu->dc_di_assignment[9] = params->direct_async.di;
++ _ipu_dc_init(ipu, 9, params->direct_async.di, false, IPU_PIX_FMT_GENERIC);
++ ipu->di_use_count[params->direct_async.di]++;
++ ipu->dc_use_count++;
++ break;
++ default:
++ dev_err(ipu->dev, "Missing channel initialization\n");
++ break;
++ }
++
++ ipu->channel_init_mask |= 1L << IPU_CHAN_ID(channel);
++
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_init_channel);
++
++/*!
++ * This function is called to uninitialize a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID to uninit.
++ */
++void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t in_dma, out_dma = 0;
++ uint32_t ipu_conf;
++ uint32_t dc_chan = 0;
++ int ret;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if ((ipu->channel_init_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
++ dev_dbg(ipu->dev, "Channel already uninitialized %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return;
++ }
++
++ /* Make sure channel is disabled */
++ /* Get input and output dma channels */
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++
++ if (idma_is_set(ipu, IDMAC_CHA_EN, in_dma) ||
++ idma_is_set(ipu, IDMAC_CHA_EN, out_dma)) {
++ dev_err(ipu->dev,
++ "Channel %d is not disabled, disable first\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return;
++ }
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++
++ /* Reset the double buffer */
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(in_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_DB_MODE_SEL(in_dma));
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(out_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_DB_MODE_SEL(out_dma));
++
++ /* Reset the triple buffer */
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(in_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(in_dma), IPU_CHA_TRB_MODE_SEL(in_dma));
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(out_dma));
++ ipu_cm_write(ipu, reg & ~idma_mask(out_dma), IPU_CHA_TRB_MODE_SEL(out_dma));
++
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_dp_graphic_chan(in_dma)) {
++ ipu->sec_chan_en[IPU_CHAN_ID(channel)] = false;
++ ipu->thrd_chan_en[IPU_CHAN_ID(channel)] = false;
++ }
++
++ switch (channel) {
++ case CSI_MEM0:
++ case CSI_MEM1:
++ case CSI_MEM2:
++ case CSI_MEM3:
++ ipu->smfc_use_count--;
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case CSI_PRP_ENC_MEM:
++ ipu->ic_use_count--;
++ if (ipu->using_ic_dirct_ch == CSI_PRP_ENC_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpenc(ipu);
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case CSI_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ if (ipu->using_ic_dirct_ch == CSI_PRP_VF_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpvf(ipu);
++ if (ipu->csi_channel[0] == channel) {
++ ipu->csi_channel[0] = CHAN_NONE;
++ } else if (ipu->csi_channel[1] == channel) {
++ ipu->csi_channel[1] = CHAN_NONE;
++ }
++ break;
++ case MEM_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_prpvf(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ipu->ic_use_count--;
++ ipu->vdi_use_count--;
++ if (ipu->using_ic_dirct_ch == MEM_VDI_PRP_VF_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_ic_uninit_prpvf(ipu);
++ _ipu_vdi_uninit(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_VF_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_VDI_MEM:
++ ipu->ic_use_count--;
++ ipu->vdi_use_count--;
++ if (ipu->using_ic_dirct_ch == MEM_VDI_MEM)
++ ipu->using_ic_dirct_ch = 0;
++ _ipu_vdi_uninit(ipu);
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ case MEM_VDI_PRP_VF_MEM_N:
++ case MEM_VDI_MEM_P:
++ case MEM_VDI_MEM_N:
++ break;
++ case MEM_ROT_VF_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_vf(ipu);
++ break;
++ case MEM_PRP_ENC_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_prpenc(ipu);
++ reg = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW1);
++ break;
++ case MEM_ROT_ENC_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_enc(ipu);
++ break;
++ case MEM_PP_MEM:
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_pp(ipu);
++ break;
++ case MEM_ROT_PP_MEM:
++ ipu->rot_use_count--;
++ ipu->ic_use_count--;
++ _ipu_ic_uninit_rotate_pp(ipu);
++ break;
++ case MEM_DC_SYNC:
++ dc_chan = 1;
++ _ipu_dc_uninit(ipu, 1);
++ ipu->di_use_count[ipu->dc_di_assignment[1]]--;
++ ipu->dc_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case MEM_BG_SYNC:
++ dc_chan = 5;
++ _ipu_dp_uninit(ipu, channel);
++ _ipu_dc_uninit(ipu, 5);
++ ipu->di_use_count[ipu->dc_di_assignment[5]]--;
++ ipu->dc_use_count--;
++ ipu->dp_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case MEM_FG_SYNC:
++ _ipu_dp_uninit(ipu, channel);
++ ipu->dc_use_count--;
++ ipu->dp_use_count--;
++ ipu->dmfc_use_count--;
++ break;
++ case DIRECT_ASYNC0:
++ dc_chan = 8;
++ _ipu_dc_uninit(ipu, 8);
++ ipu->di_use_count[ipu->dc_di_assignment[8]]--;
++ ipu->dc_use_count--;
++ break;
++ case DIRECT_ASYNC1:
++ dc_chan = 9;
++ _ipu_dc_uninit(ipu, 9);
++ ipu->di_use_count[ipu->dc_di_assignment[9]]--;
++ ipu->dc_use_count--;
++ break;
++ default:
++ break;
++ }
++
++ if (ipu->ic_use_count == 0)
++ ipu_conf &= ~IPU_CONF_IC_EN;
++ if (ipu->vdi_use_count == 0) {
++ ipu_conf &= ~IPU_CONF_ISP_EN;
++ ipu_conf &= ~IPU_CONF_VDI_EN;
++ ipu_conf &= ~IPU_CONF_IC_INPUT;
++ }
++ if (ipu->rot_use_count == 0)
++ ipu_conf &= ~IPU_CONF_ROT_EN;
++ if (ipu->dc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DC_EN;
++ if (ipu->dp_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DP_EN;
++ if (ipu->dmfc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_DMFC_EN;
++ if (ipu->di_use_count[0] == 0) {
++ ipu_conf &= ~IPU_CONF_DI0_EN;
++ }
++ if (ipu->di_use_count[1] == 0) {
++ ipu_conf &= ~IPU_CONF_DI1_EN;
++ }
++ if (ipu->smfc_use_count == 0)
++ ipu_conf &= ~IPU_CONF_SMFC_EN;
++
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++ ipu->channel_init_mask &= ~(1L << IPU_CHAN_ID(channel));
++
++ /*
++ * Disable pixel clk and its parent clock(if the parent clock
++ * usecount is 1) after clearing DC/DP/DI bits in IPU_CONF
++ * register to prevent LVDS display channel starvation.
++ */
++ if (_ipu_is_primary_disp_chan(in_dma))
++ clk_disable_unprepare(ipu->pixel_clk[ipu->dc_di_assignment[dc_chan]]);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ ret = pm_runtime_put_sync_suspend(ipu->dev);
++ if (ret < 0) {
++ dev_err(ipu->dev, "ch = %d, pm_runtime_put failed:%d!\n",
++ IPU_CHAN_ID(channel), ret);
++ dump_stack();
++ }
++
++ WARN_ON(ipu->ic_use_count < 0);
++ WARN_ON(ipu->vdi_use_count < 0);
++ WARN_ON(ipu->rot_use_count < 0);
++ WARN_ON(ipu->dc_use_count < 0);
++ WARN_ON(ipu->dp_use_count < 0);
++ WARN_ON(ipu->dmfc_use_count < 0);
++ WARN_ON(ipu->smfc_use_count < 0);
++}
++EXPORT_SYMBOL(ipu_uninit_channel);
++
++/*!
++ * This function is called to initialize buffer(s) for logical IPU channel.
++ *
++ * @param ipu ipu handler
++ *
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width Input parameter for width of buffer in pixels.
++ *
++ * @param height Input parameter for height of buffer in pixels.
++ *
++ * @param stride Input parameter for stride length of buffer
++ * in pixels.
++ *
++ * @param rot_mode Input parameter for rotation setting of buffer.
++ * A rotation setting other than
++ * IPU_ROTATE_VERT_FLIP
++ * should only be used for input buffers of
++ * rotation channels.
++ *
++ * @param phyaddr_0 Input parameter buffer 0 physical address.
++ *
++ * @param phyaddr_1 Input parameter buffer 1 physical address.
++ * Setting this to a value other than NULL enables
++ * double buffering mode.
++ *
++ * @param phyaddr_2 Input parameter buffer 2 physical address.
++ * Setting this to a value other than NULL enables
++ * triple buffering mode, phyaddr_1 should not be
++ * NULL then.
++ *
++ * @param u private u offset for additional cropping,
++ * zero if not used.
++ *
++ * @param v private v offset for additional cropping,
++ * zero if not used.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ ipu_rotate_mode_t rot_mode,
++ dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
++ dma_addr_t phyaddr_2,
++ uint32_t u, uint32_t v)
++{
++ uint32_t reg;
++ uint32_t dma_chan;
++ uint32_t burst_size;
++
++ dma_chan = channel_2_dma(channel, type);
++ if (!idma_is_valid(dma_chan))
++ return -EINVAL;
++
++ if (stride < width * bytes_per_pixel(pixel_fmt))
++ stride = width * bytes_per_pixel(pixel_fmt);
++
++ if (stride % 4) {
++ dev_err(ipu->dev,
++ "Stride not 32-bit aligned, stride = %d\n", stride);
++ return -EINVAL;
++ }
++ /* IC & IRT channels' width must be multiple of 8 pixels */
++ if ((_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan))
++ && (width % 8)) {
++ dev_err(ipu->dev, "Width must be 8 pixel multiple\n");
++ return -EINVAL;
++ }
++
++ if (_ipu_is_vdi_out_chan(dma_chan) &&
++ ((width < 16) || (height < 16) || (width % 2) || (height % 4))) {
++ dev_err(ipu->dev, "vdi width/height limited err\n");
++ return -EINVAL;
++ }
++
++ /* IPUv3EX and IPUv3M support triple buffer */
++ if ((!_ipu_is_trb_chan(dma_chan)) && phyaddr_2) {
++ dev_err(ipu->dev, "Chan%d doesn't support triple buffer "
++ "mode\n", dma_chan);
++ return -EINVAL;
++ }
++ if (!phyaddr_1 && phyaddr_2) {
++ dev_err(ipu->dev, "Chan%d's buf1 physical addr is NULL for "
++ "triple buffer mode\n", dma_chan);
++ return -EINVAL;
++ }
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* Build parameter memory data for DMA channel */
++ _ipu_ch_param_init(ipu, dma_chan, pixel_fmt, width, height, stride, u, v, 0,
++ phyaddr_0, phyaddr_1, phyaddr_2);
++
++ /* Set correlative channel parameter of local alpha channel */
++ if ((_ipu_is_ic_graphic_chan(dma_chan) ||
++ _ipu_is_dp_graphic_chan(dma_chan)) &&
++ (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] == true)) {
++ _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, true);
++ _ipu_ch_param_set_alpha_buffer_memory(ipu, dma_chan);
++ _ipu_ch_param_set_alpha_condition_read(ipu, dma_chan);
++ /* fix alpha width as 8 and burst size as 16*/
++ _ipu_ch_params_set_alpha_width(ipu, dma_chan, 8);
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
++ } else if (_ipu_is_ic_graphic_chan(dma_chan) &&
++ ipu_pixel_format_has_alpha(pixel_fmt))
++ _ipu_ch_param_set_alpha_use_separate_channel(ipu, dma_chan, false);
++
++ if (rot_mode)
++ _ipu_ch_param_set_rotation(ipu, dma_chan, rot_mode);
++
++ /* IC and ROT channels have restriction of 8 or 16 pix burst length */
++ if (_ipu_is_ic_chan(dma_chan) || _ipu_is_vdi_out_chan(dma_chan)) {
++ if ((width % 16) == 0)
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 16);
++ else
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
++ } else if (_ipu_is_irt_chan(dma_chan)) {
++ _ipu_ch_param_set_burst_size(ipu, dma_chan, 8);
++ _ipu_ch_param_set_block_mode(ipu, dma_chan);
++ } else if (_ipu_is_dmfc_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ _ipu_dmfc_set_wait4eot(ipu, dma_chan, width);
++ _ipu_dmfc_set_burst_size(ipu, dma_chan, burst_size);
++ }
++
++ if (_ipu_disp_chan_is_interlaced(ipu, channel) ||
++ ipu->chan_is_interlaced[dma_chan])
++ _ipu_ch_param_set_interlaced_scan(ipu, dma_chan);
++
++ if (_ipu_is_ic_chan(dma_chan) || _ipu_is_irt_chan(dma_chan) ||
++ _ipu_is_vdi_out_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ _ipu_ic_idma_init(ipu, dma_chan, width, height, burst_size,
++ rot_mode);
++ } else if (_ipu_is_smfc_chan(dma_chan)) {
++ burst_size = _ipu_ch_param_get_burst_size(ipu, dma_chan);
++ /*
++ * This is different from IPUv3 spec, but it is confirmed
++ * in IPUforum that SMFC burst size should be NPB[6:3]
++ * when IDMAC works in 16-bit generic data mode.
++ */
++ if (pixel_fmt == IPU_PIX_FMT_GENERIC)
++ /* 8 bits per pixel */
++ burst_size = burst_size >> 4;
++ else if (pixel_fmt == IPU_PIX_FMT_GENERIC_16)
++ /* 16 bits per pixel */
++ burst_size = burst_size >> 3;
++ else
++ burst_size = burst_size >> 2;
++ _ipu_smfc_set_burst_size(ipu, channel, burst_size-1);
++ }
++
++ /* AXI-id */
++ if (idma_is_set(ipu, IDMAC_CHA_PRI, dma_chan)) {
++ unsigned reg = IDMAC_CH_LOCK_EN_1;
++ uint32_t value = 0;
++ if (ipu->pdata->devtype == IPU_V3H) {
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 0);
++ switch (dma_chan) {
++ case 5:
++ value = 0x3;
++ break;
++ case 11:
++ value = 0x3 << 2;
++ break;
++ case 12:
++ value = 0x3 << 4;
++ break;
++ case 14:
++ value = 0x3 << 6;
++ break;
++ case 15:
++ value = 0x3 << 8;
++ break;
++ case 20:
++ value = 0x3 << 10;
++ break;
++ case 21:
++ value = 0x3 << 12;
++ break;
++ case 22:
++ value = 0x3 << 14;
++ break;
++ case 23:
++ value = 0x3 << 16;
++ break;
++ case 27:
++ value = 0x3 << 18;
++ break;
++ case 28:
++ value = 0x3 << 20;
++ break;
++ case 45:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 0;
++ break;
++ case 46:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 2;
++ break;
++ case 47:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 4;
++ break;
++ case 48:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 6;
++ break;
++ case 49:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 8;
++ break;
++ case 50:
++ reg = IDMAC_CH_LOCK_EN_2;
++ value = 0x3 << 10;
++ break;
++ default:
++ break;
++ }
++ value |= ipu_idmac_read(ipu, reg);
++ ipu_idmac_write(ipu, value, reg);
++ } else
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 1);
++ } else {
++ if (ipu->pdata->devtype == IPU_V3H)
++ _ipu_ch_param_set_axi_id(ipu, dma_chan, 1);
++ }
++
++ _ipu_ch_param_dump(ipu, dma_chan);
++
++ if (phyaddr_2 && g_ipu_hw_rev >= IPU_V3DEX) {
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
++
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ reg |= idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
++
++ /* Set IDMAC third buffer's cpmem number */
++ /* See __ipu_ch_get_third_buf_cpmem_num() for mapping */
++ ipu_idmac_write(ipu, 0x00444047L, IDMAC_SUB_ADDR_4);
++ ipu_idmac_write(ipu, 0x46004241L, IDMAC_SUB_ADDR_3);
++ ipu_idmac_write(ipu, 0x00000045L, IDMAC_SUB_ADDR_1);
++
++ /* Reset to buffer 0 */
++ ipu_cm_write(ipu, tri_cur_buf_mask(dma_chan),
++ IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
++ } else {
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_TRB_MODE_SEL(dma_chan));
++
++ reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(dma_chan));
++ if (phyaddr_1)
++ reg |= idma_mask(dma_chan);
++ else
++ reg &= ~idma_mask(dma_chan);
++ ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(dma_chan));
++
++ /* Reset to buffer 0 */
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_CUR_BUF(dma_chan));
++
++ }
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_channel_buffer);
++
++/*!
++ * This function is called to update the physical address of a buffer for
++ * a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param bufNum Input parameter for buffer number to update.
++ * 0 or 1 are the only valid values.
++ *
++ * @param phyaddr Input parameter buffer physical address.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail. This function will fail if the buffer is set to ready.
++ */
++int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum, dma_addr_t phyaddr)
++{
++ uint32_t reg;
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if (bufNum == 0)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
++
++ if ((reg & idma_mask(dma_chan)) == 0)
++ _ipu_ch_param_set_buffer(ipu, dma_chan, bufNum, phyaddr);
++ else
++ ret = -EACCES;
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_update_channel_buffer);
++
++/*!
++ * This function is called to update the band mode setting for
++ * a logical IPU channel.
++ *
++ * @param ipu ipu handler
++ *
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param band_height Input parameter for band lines:
++ * shoule be log2(4/8/16/32/64/128/256).
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t band_height)
++{
++ uint32_t reg;
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++
++ if ((2 > band_height) || (8 < band_height))
++ return -EINVAL;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ reg = ipu_idmac_read(ipu, IDMAC_BAND_EN(dma_chan));
++ reg |= 1 << (dma_chan % 32);
++ ipu_idmac_write(ipu, reg, IDMAC_BAND_EN(dma_chan));
++
++ _ipu_ch_param_set_bandmode(ipu, dma_chan, band_height);
++ dev_dbg(ipu->dev, "dma_chan:%d, band_height:%d.\n\n",
++ dma_chan, 1 << band_height);
++ mutex_unlock(&ipu->mutex_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_set_channel_bandmode);
++
++/*!
++ * This function is called to initialize a buffer for logical IPU channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width Input parameter for width of buffer in pixels.
++ *
++ * @param height Input parameter for height of buffer in pixels.
++ *
++ * @param stride Input parameter for stride length of buffer
++ * in pixels.
++ *
++ * @param u predefined private u offset for additional cropping,
++ * zero if not used.
++ *
++ * @param v predefined private v offset for additional cropping,
++ * zero if not used.
++ *
++ * @param vertical_offset vertical offset for Y coordinate
++ * in the existed frame
++ *
++ *
++ * @param horizontal_offset horizontal offset for X coordinate
++ * in the existed frame
++ *
++ *
++ * @return Returns 0 on success or negative error code on fail
++ * This function will fail if any buffer is set to ready.
++ */
++
++int32_t ipu_update_channel_offset(struct ipu_soc *ipu,
++ ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t vertical_offset, uint32_t horizontal_offset)
++{
++ int ret = 0;
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if ((ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan)) & idma_mask(dma_chan)) ||
++ (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan)) & idma_mask(dma_chan)) ||
++ ((ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan)) & idma_mask(dma_chan)) &&
++ (ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan)) & idma_mask(dma_chan)) &&
++ _ipu_is_trb_chan(dma_chan)))
++ ret = -EACCES;
++ else
++ _ipu_ch_offset_update(ipu, dma_chan, pixel_fmt, width, height, stride,
++ u, v, 0, vertical_offset, horizontal_offset);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_update_channel_offset);
++
++
++/*!
++ * This function is called to set a channel's buffer as ready.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to initialize.
++ *
++ * @param bufNum Input parameter for which buffer number set to
++ * ready state.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum)
++{
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Mark buffer to be ready. */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ ipu_cm_write(ipu, idma_mask(dma_chan),
++ IPU_CHA_BUF2_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_select_buffer);
++
++/*!
++ * This function is called to set a channel's buffer as ready.
++ *
++ * @param ipu ipu handler
++ * @param bufNum Input parameter for which buffer number set to
++ * ready state.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum)
++{
++
++ uint32_t dma_chan = channel_2_dma(MEM_VDI_PRP_VF_MEM, IPU_INPUT_BUFFER);
++ uint32_t mask_bit =
++ idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_P, IPU_INPUT_BUFFER))|
++ idma_mask(dma_chan)|
++ idma_mask(channel_2_dma(MEM_VDI_PRP_VF_MEM_N, IPU_INPUT_BUFFER));
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Mark buffers to be ready. */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF0_RDY(dma_chan));
++ else
++ ipu_cm_write(ipu, mask_bit, IPU_CHA_BUF1_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_select_multi_vdi_buffer);
++
++#define NA -1
++static int proc_dest_sel[] = {
++ 0, 1, 1, 3, 5, 5, 4, 7, 8, 9, 10, 11, 12, 14, 15, 16,
++ 0, 1, 1, 5, 5, 5, 5, 5, 7, 8, 9, 10, 11, 12, 14, 31 };
++static int proc_src_sel[] = { 0, 6, 7, 6, 7, 8, 5, NA, NA, NA,
++ NA, NA, NA, NA, NA, 1, 2, 3, 4, 7, 8, NA, 8, NA };
++static int disp_src_sel[] = { 0, 6, 7, 8, 3, 4, 5, NA, NA, NA,
++ NA, NA, NA, NA, NA, 1, NA, 2, NA, 3, 4, 4, 4, 4 };
++
++
++/*!
++ * This function links 2 channels together for automatic frame
++ * synchronization. The output of the source channel is linked to the input of
++ * the destination channel.
++ *
++ * @param ipu ipu handler
++ * @param src_ch Input parameter for the logical channel ID of
++ * the source channel.
++ *
++ * @param dest_ch Input parameter for the logical channel ID of
++ * the destination channel.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
++{
++ int retval = 0;
++ uint32_t fs_proc_flow1;
++ uint32_t fs_proc_flow2;
++ uint32_t fs_proc_flow3;
++ uint32_t fs_disp_flow1;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
++ fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
++ fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
++
++ switch (src_ch) {
++ case CSI_MEM0:
++ fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC0_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM1:
++ fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC1_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM2:
++ fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC2_DEST_SEL_OFFSET;
++ break;
++ case CSI_MEM3:
++ fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
++ fs_proc_flow3 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_SMFC3_DEST_SEL_OFFSET;
++ break;
++ case CSI_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_DEST_SEL_OFFSET;
++ break;
++ case CSI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PP_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PP_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPENC_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_DEST_SEL_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
++ fs_proc_flow2 |=
++ proc_dest_sel[IPU_CHAN_ID(dest_ch)] <<
++ FS_PRPVF_ROT_DEST_SEL_OFFSET;
++ break;
++ case MEM_VDOA_MEM:
++ fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
++ if (MEM_VDI_MEM == dest_ch)
++ fs_proc_flow3 |= FS_VDOA_DEST_SEL_VDI;
++ else if (MEM_PP_MEM == dest_ch)
++ fs_proc_flow3 |= FS_VDOA_DEST_SEL_IC;
++ else {
++ retval = -EINVAL;
++ goto err;
++ }
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ switch (dest_ch) {
++ case MEM_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
++ if (MEM_VDOA_MEM == src_ch)
++ fs_proc_flow1 |= FS_PP_SRC_SEL_VDOA;
++ else
++ fs_proc_flow1 |= proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PP_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PRPENC_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] << FS_PRP_SRC_SEL_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
++ fs_proc_flow1 |=
++ proc_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_PRPVF_ROT_SRC_SEL_OFFSET;
++ break;
++ case MEM_DC_SYNC:
++ fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_BG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_SYNC0_SRC_SEL_OFFSET;
++ break;
++ case MEM_FG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_SYNC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_DC_ASYNC:
++ fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] << FS_DC2_SRC_SEL_OFFSET;
++ break;
++ case MEM_BG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_ASYNC0_SRC_SEL_OFFSET;
++ break;
++ case MEM_FG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
++ fs_disp_flow1 |=
++ disp_src_sel[IPU_CHAN_ID(src_ch)] <<
++ FS_DP_ASYNC1_SRC_SEL_OFFSET;
++ break;
++ case MEM_VDI_MEM:
++ fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
++ if (MEM_VDOA_MEM == src_ch)
++ fs_proc_flow1 |= FS_VDI_SRC_SEL_VDOA;
++ else {
++ retval = -EINVAL;
++ goto err;
++ }
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
++ ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
++ ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return retval;
++}
++EXPORT_SYMBOL(ipu_link_channels);
++
++/*!
++ * This function unlinks 2 channels and disables automatic frame
++ * synchronization.
++ *
++ * @param ipu ipu handler
++ * @param src_ch Input parameter for the logical channel ID of
++ * the source channel.
++ *
++ * @param dest_ch Input parameter for the logical channel ID of
++ * the destination channel.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch)
++{
++ int retval = 0;
++ uint32_t fs_proc_flow1;
++ uint32_t fs_proc_flow2;
++ uint32_t fs_proc_flow3;
++ uint32_t fs_disp_flow1;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ fs_proc_flow1 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW1);
++ fs_proc_flow2 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW2);
++ fs_proc_flow3 = ipu_cm_read(ipu, IPU_FS_PROC_FLOW3);
++ fs_disp_flow1 = ipu_cm_read(ipu, IPU_FS_DISP_FLOW1);
++
++ switch (src_ch) {
++ case CSI_MEM0:
++ fs_proc_flow3 &= ~FS_SMFC0_DEST_SEL_MASK;
++ break;
++ case CSI_MEM1:
++ fs_proc_flow3 &= ~FS_SMFC1_DEST_SEL_MASK;
++ break;
++ case CSI_MEM2:
++ fs_proc_flow3 &= ~FS_SMFC2_DEST_SEL_MASK;
++ break;
++ case CSI_MEM3:
++ fs_proc_flow3 &= ~FS_SMFC3_DEST_SEL_MASK;
++ break;
++ case CSI_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ break;
++ case CSI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow2 &= ~FS_PP_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow2 &= ~FS_PRPENC_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_DEST_SEL_MASK;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow2 &= ~FS_PRPVF_ROT_DEST_SEL_MASK;
++ break;
++ case MEM_VDOA_MEM:
++ fs_proc_flow3 &= ~FS_VDOA_DEST_SEL_MASK;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ switch (dest_ch) {
++ case MEM_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_PP_MEM:
++ fs_proc_flow1 &= ~FS_PP_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_PRP_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_ENC_MEM:
++ fs_proc_flow1 &= ~FS_PRPENC_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRP_SRC_SEL_MASK;
++ break;
++ case MEM_ROT_VF_MEM:
++ fs_proc_flow1 &= ~FS_PRPVF_ROT_SRC_SEL_MASK;
++ break;
++ case MEM_DC_SYNC:
++ fs_disp_flow1 &= ~FS_DC1_SRC_SEL_MASK;
++ break;
++ case MEM_BG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC0_SRC_SEL_MASK;
++ break;
++ case MEM_FG_SYNC:
++ fs_disp_flow1 &= ~FS_DP_SYNC1_SRC_SEL_MASK;
++ break;
++ case MEM_DC_ASYNC:
++ fs_disp_flow1 &= ~FS_DC2_SRC_SEL_MASK;
++ break;
++ case MEM_BG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC0_SRC_SEL_MASK;
++ break;
++ case MEM_FG_ASYNC0:
++ fs_disp_flow1 &= ~FS_DP_ASYNC1_SRC_SEL_MASK;
++ break;
++ case MEM_VDI_MEM:
++ fs_proc_flow1 &= ~FS_VDI_SRC_SEL_MASK;
++ break;
++ default:
++ retval = -EINVAL;
++ goto err;
++ }
++
++ ipu_cm_write(ipu, fs_proc_flow1, IPU_FS_PROC_FLOW1);
++ ipu_cm_write(ipu, fs_proc_flow2, IPU_FS_PROC_FLOW2);
++ ipu_cm_write(ipu, fs_proc_flow3, IPU_FS_PROC_FLOW3);
++ ipu_cm_write(ipu, fs_disp_flow1, IPU_FS_DISP_FLOW1);
++
++err:
++ mutex_unlock(&ipu->mutex_lock);
++ return retval;
++}
++EXPORT_SYMBOL(ipu_unlink_channels);
++
++/*!
++ * This function check whether a logical channel was enabled.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 1 while request channel is enabled or
++ * 0 for not enabled.
++ */
++int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t in_dma;
++ uint32_t out_dma;
++
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ if (reg & idma_mask(in_dma))
++ return 1;
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ if (reg & idma_mask(out_dma))
++ return 1;
++ return 0;
++}
++EXPORT_SYMBOL(ipu_is_channel_busy);
++
++/*!
++ * This function enables a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t reg;
++ uint32_t ipu_conf;
++ uint32_t in_dma;
++ uint32_t out_dma;
++ uint32_t sec_dma;
++ uint32_t thrd_dma;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) {
++ dev_err(ipu->dev, "Warning: channel already enabled %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return -EACCES;
++ }
++
++ /* Get input and output dma channels */
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ ipu_conf = ipu_cm_read(ipu, IPU_CONF);
++ if (ipu->di_use_count[0] > 0) {
++ ipu_conf |= IPU_CONF_DI0_EN;
++ }
++ if (ipu->di_use_count[1] > 0) {
++ ipu_conf |= IPU_CONF_DI1_EN;
++ }
++ if (ipu->dp_use_count > 0)
++ ipu_conf |= IPU_CONF_DP_EN;
++ if (ipu->dc_use_count > 0)
++ ipu_conf |= IPU_CONF_DC_EN;
++ if (ipu->dmfc_use_count > 0)
++ ipu_conf |= IPU_CONF_DMFC_EN;
++ if (ipu->ic_use_count > 0)
++ ipu_conf |= IPU_CONF_IC_EN;
++ if (ipu->vdi_use_count > 0) {
++ ipu_conf |= IPU_CONF_ISP_EN;
++ ipu_conf |= IPU_CONF_VDI_EN;
++ ipu_conf |= IPU_CONF_IC_INPUT;
++ }
++ if (ipu->rot_use_count > 0)
++ ipu_conf |= IPU_CONF_ROT_EN;
++ if (ipu->smfc_use_count > 0)
++ ipu_conf |= IPU_CONF_SMFC_EN;
++ ipu_cm_write(ipu, ipu_conf, IPU_CONF);
++
++ if (idma_is_valid(in_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
++ }
++ if (idma_is_valid(out_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
++ }
++
++ if ((ipu->sec_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM) ||
++ (channel == MEM_VDI_PRP_VF_MEM))) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
++ }
++ if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_PP_MEM) || (channel == MEM_PRP_VF_MEM))) {
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg | idma_mask(sec_dma), IDMAC_SEP_ALPHA);
++ } else if ((ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) &&
++ ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))) {
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_SEP_ALPHA);
++ }
++
++ if ((channel == MEM_DC_SYNC) || (channel == MEM_BG_SYNC) ||
++ (channel == MEM_FG_SYNC)) {
++ reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(in_dma), IDMAC_WM_EN(in_dma));
++
++ _ipu_dp_dc_enable(ipu, channel);
++ }
++
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
++ _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
++ _ipu_is_vdi_out_chan(out_dma))
++ _ipu_ic_enable_task(ipu, channel);
++
++ ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(channel);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_enable_channel);
++
++/*!
++ * This function check buffer ready for a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to clear.
++ *
++ * @param bufNum Input parameter for which buffer number clear
++ * ready state.
++ *
++ */
++int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ uint32_t dma_chan = channel_2_dma(channel, type);
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ if (dma_chan == IDMA_CHAN_INVALID)
++ return -EINVAL;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ if (bufNum == 0)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(dma_chan));
++ else if (bufNum == 1)
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(dma_chan));
++ else
++ reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(dma_chan));
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ if (reg & idma_mask(dma_chan))
++ return 1;
++ else
++ return 0;
++}
++EXPORT_SYMBOL(ipu_check_buffer_ready);
++
++/*!
++ * This function clear buffer ready for a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param type Input parameter which buffer to clear.
++ *
++ * @param bufNum Input parameter for which buffer number clear
++ * ready state.
++ *
++ */
++void _ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ uint32_t dma_ch = channel_2_dma(channel, type);
++
++ if (!idma_is_valid(dma_ch))
++ return;
++
++ ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
++ if (bufNum == 0)
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF0_RDY(dma_ch));
++ else if (bufNum == 1)
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF1_RDY(dma_ch));
++ else
++ ipu_cm_write(ipu, idma_mask(dma_ch),
++ IPU_CHA_BUF2_RDY(dma_ch));
++ ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
++}
++
++void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum)
++{
++ unsigned long lock_flags;
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ _ipu_clear_buffer_ready(ipu, channel, type, bufNum);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++}
++EXPORT_SYMBOL(ipu_clear_buffer_ready);
++
++/*!
++ * This function disables a logical channel.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param wait_for_stop Flag to set whether to wait for channel end
++ * of frame or return immediately.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop)
++{
++ uint32_t reg;
++ uint32_t in_dma;
++ uint32_t out_dma;
++ uint32_t sec_dma = NO_DMA;
++ uint32_t thrd_dma = NO_DMA;
++ uint16_t fg_pos_x, fg_pos_y;
++ unsigned long lock_flags;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if ((ipu->channel_enable_mask & (1L << IPU_CHAN_ID(channel))) == 0) {
++ dev_dbg(ipu->dev, "Channel already disabled %d\n",
++ IPU_CHAN_ID(channel));
++ mutex_unlock(&ipu->mutex_lock);
++ return -EACCES;
++ }
++
++ /* Get input and output dma channels */
++ out_dma = channel_2_dma(channel, IPU_OUTPUT_BUFFER);
++ in_dma = channel_2_dma(channel, IPU_VIDEO_IN_BUFFER);
++
++ if ((idma_is_valid(in_dma) &&
++ !idma_is_set(ipu, IDMAC_CHA_EN, in_dma))
++ && (idma_is_valid(out_dma) &&
++ !idma_is_set(ipu, IDMAC_CHA_EN, out_dma))) {
++ mutex_unlock(&ipu->mutex_lock);
++ return -EINVAL;
++ }
++
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)])
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)]) {
++ sec_dma = channel_2_dma(channel, IPU_GRAPH_IN_BUFFER);
++ thrd_dma = channel_2_dma(channel, IPU_ALPHA_IN_BUFFER);
++ }
++
++ if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
++ (channel == MEM_DC_SYNC)) {
++ if (channel == MEM_FG_SYNC) {
++ _ipu_disp_get_window_pos(ipu, channel, &fg_pos_x, &fg_pos_y);
++ _ipu_disp_set_window_pos(ipu, channel, 0, 0);
++ }
++
++ _ipu_dp_dc_disable(ipu, channel, false);
++
++ /*
++ * wait for BG channel EOF then disable FG-IDMAC,
++ * it avoid FG NFB4EOF error.
++ */
++ if ((channel == MEM_FG_SYNC) && (ipu_is_channel_busy(ipu, MEM_BG_SYNC))) {
++ int timeout = 50;
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF),
++ IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF));
++ while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_BG_SYNC_EOF)) &
++ IPUIRQ_2_MASK(IPU_IRQ_BG_SYNC_EOF)) == 0) {
++ msleep(10);
++ timeout -= 10;
++ if (timeout <= 0) {
++ dev_err(ipu->dev, "warning: wait for bg sync eof timeout\n");
++ break;
++ }
++ }
++ }
++ } else if (wait_for_stop && !_ipu_is_smfc_chan(out_dma) &&
++ channel != CSI_PRP_VF_MEM && channel != CSI_PRP_ENC_MEM) {
++ while (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma) ||
++ idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma) ||
++ (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma)) ||
++ (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))) {
++ uint32_t irq = 0xffffffff;
++ int timeout = 50000;
++
++ if (idma_is_set(ipu, IDMAC_CHA_BUSY, out_dma))
++ irq = out_dma;
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, sec_dma))
++ irq = sec_dma;
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] &&
++ idma_is_set(ipu, IDMAC_CHA_BUSY, thrd_dma))
++ irq = thrd_dma;
++ if (idma_is_set(ipu, IDMAC_CHA_BUSY, in_dma))
++ irq = in_dma;
++
++ if (irq == 0xffffffff) {
++ dev_dbg(ipu->dev, "warning: no channel busy, break\n");
++ break;
++ }
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq),
++ IPUIRQ_2_STATREG(irq));
++
++ dev_dbg(ipu->dev, "warning: channel %d busy, need wait\n", irq);
++
++ while (((ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq))
++ & IPUIRQ_2_MASK(irq)) == 0) &&
++ (idma_is_set(ipu, IDMAC_CHA_BUSY, irq))) {
++ udelay(10);
++ timeout -= 10;
++ if (timeout <= 0) {
++ ipu_dump_registers(ipu);
++ dev_err(ipu->dev, "warning: disable ipu dma channel %d during its busy state\n", irq);
++ break;
++ }
++ }
++ dev_dbg(ipu->dev, "wait_time:%d\n", 50000 - timeout);
++
++ }
++ }
++
++ if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC) ||
++ (channel == MEM_DC_SYNC)) {
++ reg = ipu_idmac_read(ipu, IDMAC_WM_EN(in_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_WM_EN(in_dma));
++ }
++
++ /* Disable IC task */
++ if (_ipu_is_ic_chan(in_dma) || _ipu_is_ic_chan(out_dma) ||
++ _ipu_is_irt_chan(in_dma) || _ipu_is_irt_chan(out_dma) ||
++ _ipu_is_vdi_out_chan(out_dma))
++ _ipu_ic_disable_task(ipu, channel);
++
++ /* Disable DMA channel(s) */
++ if (idma_is_valid(in_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(in_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_CHA_EN(in_dma));
++ ipu_cm_write(ipu, idma_mask(in_dma), IPU_CHA_CUR_BUF(in_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(in_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(in_dma));
++ }
++ if (idma_is_valid(out_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(out_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(out_dma), IDMAC_CHA_EN(out_dma));
++ ipu_cm_write(ipu, idma_mask(out_dma), IPU_CHA_CUR_BUF(out_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(out_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(out_dma));
++ }
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(sec_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_CHA_EN(sec_dma));
++ ipu_cm_write(ipu, idma_mask(sec_dma), IPU_CHA_CUR_BUF(sec_dma));
++ }
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(thrd_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(thrd_dma), IDMAC_CHA_EN(thrd_dma));
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC) {
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg & ~idma_mask(in_dma), IDMAC_SEP_ALPHA);
++ } else {
++ reg = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
++ ipu_idmac_write(ipu, reg & ~idma_mask(sec_dma), IDMAC_SEP_ALPHA);
++ }
++ ipu_cm_write(ipu, idma_mask(thrd_dma), IPU_CHA_CUR_BUF(thrd_dma));
++ }
++
++ if (channel == MEM_FG_SYNC)
++ _ipu_disp_set_window_pos(ipu, channel, fg_pos_x, fg_pos_y);
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ /* Set channel buffers NOT to be ready */
++ if (idma_is_valid(in_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 1);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_VIDEO_IN_BUFFER, 2);
++ }
++ if (idma_is_valid(out_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_OUTPUT_BUFFER, 1);
++ }
++ if (ipu->sec_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(sec_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_GRAPH_IN_BUFFER, 1);
++ }
++ if (ipu->thrd_chan_en[IPU_CHAN_ID(channel)] && idma_is_valid(thrd_dma)) {
++ _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, channel, IPU_ALPHA_IN_BUFFER, 1);
++ }
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(channel));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disable_channel);
++
++/*!
++ * This function enables CSI.
++ *
++ * @param ipu ipu handler
++ * @param csi csi num 0 or 1
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t reg;
++
++ if (csi > 1) {
++ dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
++ return -EINVAL;
++ }
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ipu->csi_use_count[csi]++;
++
++ if (ipu->csi_use_count[csi] == 1) {
++ reg = ipu_cm_read(ipu, IPU_CONF);
++ if (csi == 0)
++ ipu_cm_write(ipu, reg | IPU_CONF_CSI0_EN, IPU_CONF);
++ else
++ ipu_cm_write(ipu, reg | IPU_CONF_CSI1_EN, IPU_CONF);
++ }
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_enable_csi);
++
++/*!
++ * This function disables CSI.
++ *
++ * @param ipu ipu handler
++ * @param csi csi num 0 or 1
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi)
++{
++ uint32_t reg;
++
++ if (csi > 1) {
++ dev_err(ipu->dev, "Wrong csi num_%d\n", csi);
++ return -EINVAL;
++ }
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ipu->csi_use_count[csi]--;
++ if (ipu->csi_use_count[csi] == 0) {
++ _ipu_csi_wait4eof(ipu, ipu->csi_channel[csi]);
++ reg = ipu_cm_read(ipu, IPU_CONF);
++ if (csi == 0)
++ ipu_cm_write(ipu, reg & ~IPU_CONF_CSI0_EN, IPU_CONF);
++ else
++ ipu_cm_write(ipu, reg & ~IPU_CONF_CSI1_EN, IPU_CONF);
++ }
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disable_csi);
++
++static irqreturn_t ipu_sync_irq_handler(int irq, void *desc)
++{
++ struct ipu_soc *ipu = desc;
++ int i;
++ uint32_t line, bit, int_stat, int_ctrl;
++ irqreturn_t result = IRQ_NONE;
++ const int int_reg[] = { 1, 2, 3, 4, 11, 12, 13, 14, 15, 0 };
++
++ spin_lock(&ipu->int_reg_spin_lock);
++
++ for (i = 0; int_reg[i] != 0; i++) {
++ int_stat = ipu_cm_read(ipu, IPU_INT_STAT(int_reg[i]));
++ int_ctrl = ipu_cm_read(ipu, IPU_INT_CTRL(int_reg[i]));
++ int_stat &= int_ctrl;
++ ipu_cm_write(ipu, int_stat, IPU_INT_STAT(int_reg[i]));
++ while ((line = ffs(int_stat)) != 0) {
++ bit = --line;
++ int_stat &= ~(1UL << line);
++ line += (int_reg[i] - 1) * 32;
++ result |=
++ ipu->irq_list[line].handler(line,
++ ipu->irq_list[line].
++ dev_id);
++ if (ipu->irq_list[line].flags & IPU_IRQF_ONESHOT) {
++ int_ctrl &= ~(1UL << bit);
++ ipu_cm_write(ipu, int_ctrl,
++ IPU_INT_CTRL(int_reg[i]));
++ }
++ }
++ }
++
++ spin_unlock(&ipu->int_reg_spin_lock);
++
++ return result;
++}
++
++static irqreturn_t ipu_err_irq_handler(int irq, void *desc)
++{
++ struct ipu_soc *ipu = desc;
++ int i;
++ uint32_t int_stat;
++ const int err_reg[] = { 5, 6, 9, 10, 0 };
++
++ spin_lock(&ipu->int_reg_spin_lock);
++
++ for (i = 0; err_reg[i] != 0; i++) {
++ int_stat = ipu_cm_read(ipu, IPU_INT_STAT(err_reg[i]));
++ int_stat &= ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i]));
++ if (int_stat) {
++ ipu_cm_write(ipu, int_stat, IPU_INT_STAT(err_reg[i]));
++ dev_warn(ipu->dev,
++ "IPU Warning - IPU_INT_STAT_%d = 0x%08X\n",
++ err_reg[i], int_stat);
++ /* Disable interrupts so we only get error once */
++ int_stat = ipu_cm_read(ipu, IPU_INT_CTRL(err_reg[i])) &
++ ~int_stat;
++ ipu_cm_write(ipu, int_stat, IPU_INT_CTRL(err_reg[i]));
++ }
++ }
++
++ spin_unlock(&ipu->int_reg_spin_lock);
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * This function enables the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to enable interrupt for.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int ret = 0;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ /*
++ * Check sync interrupt handler only, since we do nothing for
++ * error interrupts but than print out register values in the
++ * error interrupt source handler.
++ */
++ if (_ipu_is_sync_irq(irq) && (ipu->irq_list[irq].handler == NULL)) {
++ dev_err(ipu->dev, "handler hasn't been registered on sync "
++ "irq %d\n", irq);
++ ret = -EACCES;
++ goto out;
++ }
++
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg |= IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++out:
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_enable_irq);
++
++/*!
++ * This function disables the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to disable interrupt for.
++ *
++ */
++void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg &= ~IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_disable_irq);
++
++/*!
++ * This function clears the interrupt for the specified interrupt line.
++ * The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to clear interrupt for.
++ *
++ */
++void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq)
++{
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_clear_irq);
++
++/*!
++ * This function returns the current interrupt status for the specified
++ * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @return Returns true if the interrupt is pending/asserted or false if
++ * the interrupt is not pending.
++ */
++bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++ reg = ipu_cm_read(ipu, IPUIRQ_2_STATREG(irq));
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ if (reg & IPUIRQ_2_MASK(irq))
++ return true;
++ else
++ return false;
++}
++EXPORT_SYMBOL(ipu_get_irq_status);
++
++/*!
++ * This function registers an interrupt handler function for the specified
++ * interrupt line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @param handler Input parameter for address of the handler
++ * function.
++ *
++ * @param irq_flags Flags for interrupt mode. Currently not used.
++ *
++ * @param devname Input parameter for string name of driver
++ * registering the handler.
++ *
++ * @param dev_id Input parameter for pointer of data to be
++ * passed to the handler.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
++ irqreturn_t(*handler) (int, void *),
++ uint32_t irq_flags, const char *devname, void *dev_id)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int ret = 0;
++
++ BUG_ON(irq >= IPU_IRQ_COUNT);
++
++ _ipu_get(ipu);
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ if (ipu->irq_list[irq].handler != NULL) {
++ dev_err(ipu->dev,
++ "handler already installed on irq %d\n", irq);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /*
++ * Check sync interrupt handler only, since we do nothing for
++ * error interrupts but than print out register values in the
++ * error interrupt source handler.
++ */
++ if (_ipu_is_sync_irq(irq) && (handler == NULL)) {
++ dev_err(ipu->dev, "handler is NULL for sync irq %d\n", irq);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ ipu->irq_list[irq].handler = handler;
++ ipu->irq_list[irq].flags = irq_flags;
++ ipu->irq_list[irq].dev_id = dev_id;
++ ipu->irq_list[irq].name = devname;
++
++ /* clear irq stat for previous use */
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(irq), IPUIRQ_2_STATREG(irq));
++ /* enable the interrupt */
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg |= IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++out:
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++
++ return ret;
++}
++EXPORT_SYMBOL(ipu_request_irq);
++
++/*!
++ * This function unregisters an interrupt handler for the specified interrupt
++ * line. The interrupt lines are defined in \b ipu_irq_line enum.
++ *
++ * @param ipu ipu handler
++ * @param irq Interrupt line to get status for.
++ *
++ * @param dev_id Input parameter for pointer of data to be passed
++ * to the handler. This must match value passed to
++ * ipu_request_irq().
++ *
++ */
++void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++
++ _ipu_get(ipu);
++
++ if (ipu->irq_list[irq].dev_id != dev_id)
++ return;
++
++ spin_lock_irqsave(&ipu->int_reg_spin_lock, lock_flags);
++
++ /* disable the interrupt */
++ reg = ipu_cm_read(ipu, IPUIRQ_2_CTRLREG(irq));
++ reg &= ~IPUIRQ_2_MASK(irq);
++ ipu_cm_write(ipu, reg, IPUIRQ_2_CTRLREG(irq));
++ memset(&ipu->irq_list[irq], 0, sizeof(ipu->irq_list[irq]));
++
++ spin_unlock_irqrestore(&ipu->int_reg_spin_lock, lock_flags);
++
++ _ipu_put(ipu);
++}
++EXPORT_SYMBOL(ipu_free_irq);
++
++uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type)
++{
++ uint32_t reg, dma_chan;
++
++ dma_chan = channel_2_dma(channel, type);
++ if (!idma_is_valid(dma_chan))
++ return -EINVAL;
++
++ reg = ipu_cm_read(ipu, IPU_CHA_TRB_MODE_SEL(dma_chan));
++ if ((reg & idma_mask(dma_chan)) && _ipu_is_trb_chan(dma_chan)) {
++ reg = ipu_cm_read(ipu, IPU_CHA_TRIPLE_CUR_BUF(dma_chan));
++ return (reg & tri_cur_buf_mask(dma_chan)) >>
++ tri_cur_buf_shift(dma_chan);
++ } else {
++ reg = ipu_cm_read(ipu, IPU_CHA_CUR_BUF(dma_chan));
++ if (reg & idma_mask(dma_chan))
++ return 1;
++ else
++ return 0;
++ }
++}
++EXPORT_SYMBOL(ipu_get_cur_buffer_idx);
++
++uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t stat = 0;
++ uint32_t task_stat_reg = ipu_cm_read(ipu, IPU_PROC_TASK_STAT);
++
++ switch (channel) {
++ case MEM_PRP_VF_MEM:
++ stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ stat = (task_stat_reg & TSTAT_VF_MASK) >> TSTAT_VF_OFFSET;
++ break;
++ case MEM_ROT_VF_MEM:
++ stat =
++ (task_stat_reg & TSTAT_VF_ROT_MASK) >> TSTAT_VF_ROT_OFFSET;
++ break;
++ case MEM_PRP_ENC_MEM:
++ stat = (task_stat_reg & TSTAT_ENC_MASK) >> TSTAT_ENC_OFFSET;
++ break;
++ case MEM_ROT_ENC_MEM:
++ stat =
++ (task_stat_reg & TSTAT_ENC_ROT_MASK) >>
++ TSTAT_ENC_ROT_OFFSET;
++ break;
++ case MEM_PP_MEM:
++ stat = (task_stat_reg & TSTAT_PP_MASK) >> TSTAT_PP_OFFSET;
++ break;
++ case MEM_ROT_PP_MEM:
++ stat =
++ (task_stat_reg & TSTAT_PP_ROT_MASK) >> TSTAT_PP_ROT_OFFSET;
++ break;
++
++ default:
++ stat = TASK_STAT_IDLE;
++ break;
++ }
++ return stat;
++}
++
++/*!
++ * This function check for a logical channel status
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @return This function returns 0 on idle and 1 on busy.
++ *
++ */
++uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t dma_status;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ dma_status = ipu_is_channel_busy(ipu, channel);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++
++ dev_dbg(ipu->dev, "%s, dma_status:%d.\n", __func__, dma_status);
++
++ return dma_status;
++}
++EXPORT_SYMBOL(ipu_channel_status);
++
++int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch)
++{
++ uint32_t reg;
++ unsigned long lock_flags;
++ int from_dma = channel_2_dma(from_ch, IPU_INPUT_BUFFER);
++ int to_dma = channel_2_dma(to_ch, IPU_INPUT_BUFFER);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ /* enable target channel */
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(to_dma));
++ ipu_idmac_write(ipu, reg | idma_mask(to_dma), IDMAC_CHA_EN(to_dma));
++
++ ipu->channel_enable_mask |= 1L << IPU_CHAN_ID(to_ch);
++
++ /* switch dp dc */
++ _ipu_dp_dc_disable(ipu, from_ch, true);
++
++ /* disable source channel */
++ reg = ipu_idmac_read(ipu, IDMAC_CHA_EN(from_dma));
++ ipu_idmac_write(ipu, reg & ~idma_mask(from_dma), IDMAC_CHA_EN(from_dma));
++ ipu_cm_write(ipu, idma_mask(from_dma), IPU_CHA_CUR_BUF(from_dma));
++ ipu_cm_write(ipu, tri_cur_buf_mask(from_dma),
++ IPU_CHA_TRIPLE_CUR_BUF(from_dma));
++
++ ipu->channel_enable_mask &= ~(1L << IPU_CHAN_ID(from_ch));
++
++ spin_lock_irqsave(&ipu->rdy_reg_spin_lock, lock_flags);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 0);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 1);
++ _ipu_clear_buffer_ready(ipu, from_ch, IPU_VIDEO_IN_BUFFER, 2);
++ spin_unlock_irqrestore(&ipu->rdy_reg_spin_lock, lock_flags);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_swap_channel);
++
++uint32_t bytes_per_pixel(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_GENERIC: /*generic data */
++ case IPU_PIX_FMT_RGB332:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV444P:
++ return 1;
++ break;
++ case IPU_PIX_FMT_GENERIC_16: /* generic data */
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ return 2;
++ break;
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ return 3;
++ break;
++ case IPU_PIX_FMT_GENERIC_32: /*generic data */
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ return 4;
++ break;
++ default:
++ return 1;
++ break;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(bytes_per_pixel);
++
++ipu_color_space_t format_to_colorspace(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGB666:
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_GBR24:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ case IPU_PIX_FMT_LVDS666:
++ case IPU_PIX_FMT_LVDS888:
++ return RGB;
++ break;
++
++ default:
++ return YCbCr;
++ break;
++ }
++ return RGB;
++}
++
++bool ipu_pixel_format_has_alpha(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_ABGR32:
++ return true;
++ break;
++ default:
++ return false;
++ break;
++ }
++ return false;
++}
++
++bool ipu_ch_param_bad_alpha_pos(uint32_t pixel_fmt)
++{
++ return _ipu_ch_param_bad_alpha_pos(pixel_fmt);
++}
++EXPORT_SYMBOL(ipu_ch_param_bad_alpha_pos);
++
++#ifdef CONFIG_PM
++static int ipu_suspend(struct device *dev)
++{
++ struct ipu_soc *ipu = dev_get_drvdata(dev);
++
++ /* All IDMAC channel and IPU clock should be disabled.*/
++ if (ipu->pdata->pg)
++ ipu->pdata->pg(1);
++
++ dev_dbg(dev, "ipu suspend.\n");
++ return 0;
++}
++
++static int ipu_resume(struct device *dev)
++{
++ struct ipu_soc *ipu = dev_get_drvdata(dev);
++
++ if (ipu->pdata->pg) {
++ ipu->pdata->pg(0);
++
++ _ipu_get(ipu);
++ _ipu_dmfc_init(ipu, dmfc_type_setup, 1);
++ /* Set sync refresh channels as high priority */
++ ipu_idmac_write(ipu, 0x18800001L, IDMAC_CHA_PRI(0));
++ _ipu_put(ipu);
++ }
++ dev_dbg(dev, "ipu resume.\n");
++ return 0;
++}
++
++int ipu_runtime_suspend(struct device *dev)
++{
++ dev_dbg(dev, "ipu busfreq high release.\n");
++ release_bus_freq(BUS_FREQ_HIGH);
++
++ return 0;
++}
++
++int ipu_runtime_resume(struct device *dev)
++{
++ dev_dbg(dev, "ipu busfreq high requst.\n");
++ request_bus_freq(BUS_FREQ_HIGH);
++
++ return 0;
++}
++
++static const struct dev_pm_ops ipu_pm_ops = {
++ SET_RUNTIME_PM_OPS(ipu_runtime_suspend, ipu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(ipu_suspend, ipu_resume)
++};
++#endif
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver mxcipu_driver = {
++ .driver = {
++ .name = "imx-ipuv3",
++ .of_match_table = imx_ipuv3_dt_ids,
++ #ifdef CONFIG_PM
++ .pm = &ipu_pm_ops,
++ #endif
++ },
++ .probe = ipu_probe,
++ .id_table = imx_ipu_type,
++ .remove = ipu_remove,
++};
++
++int32_t __init ipu_gen_init(void)
++{
++ int32_t ret;
++
++ ret = platform_driver_register(&mxcipu_driver);
++ return 0;
++}
++
++subsys_initcall(ipu_gen_init);
++
++static void __exit ipu_gen_uninit(void)
++{
++ platform_driver_unregister(&mxcipu_driver);
++}
++
++module_exit(ipu_gen_uninit);
++
++MODULE_DESCRIPTION("i.MX IPU v3 driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_device.c linux-xbian-imx6/drivers/mxc/ipu3/ipu_device.c
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_device.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_device.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,3725 @@
++/*
++ * Copyright 2005-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_device.c
++ *
++ * @brief This file contains the IPUv3 driver device interface and fops functions.
++ *
++ * @ingroup IPU
++ */
++#include <linux/clk.h>
++#include <linux/cpumask.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/kernel.h>
++#include <linux/kthread.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/sched.h>
++#include <linux/sched/rt.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++
++#include <asm/cacheflush.h>
++#include <asm/outercache.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++#include "vdoa.h"
++
++#define CHECK_RETCODE(cont, str, err, label, ret) \
++do { \
++ if (cont) { \
++ dev_err(t->dev, "ERR:[0x%p]-no:0x%x "#str" ret:%d," \
++ "line:%d\n", t, t->task_no, ret, __LINE__);\
++ if (ret != -EACCES) { \
++ t->state = err; \
++ goto label; \
++ } \
++ } \
++} while (0)
++
++#define CHECK_RETCODE_CONT(cont, str, err, ret) \
++do { \
++ if (cont) { \
++ dev_err(t->dev, "ERR:[0x%p]-no:0x%x"#str" ret:%d," \
++ "line:%d\n", t, t->task_no, ret, __LINE__);\
++ if (ret != -EACCES) { \
++ if (t->state == STATE_OK) \
++ t->state = err; \
++ } \
++ } \
++} while (0)
++
++#undef DBG_IPU_PERF
++#ifdef DBG_IPU_PERF
++#define CHECK_PERF(ts) \
++do { \
++ getnstimeofday(ts); \
++} while (0)
++
++#define DECLARE_PERF_VAR \
++ struct timespec ts_queue; \
++ struct timespec ts_dotask; \
++ struct timespec ts_waitirq; \
++ struct timespec ts_sche; \
++ struct timespec ts_rel; \
++ struct timespec ts_frame
++
++#define PRINT_TASK_STATISTICS \
++do { \
++ ts_queue = timespec_sub(tsk->ts_dotask, tsk->ts_queue); \
++ ts_dotask = timespec_sub(tsk->ts_waitirq, tsk->ts_dotask); \
++ ts_waitirq = timespec_sub(tsk->ts_inirq, tsk->ts_waitirq); \
++ ts_sche = timespec_sub(tsk->ts_wakeup, tsk->ts_inirq); \
++ ts_rel = timespec_sub(tsk->ts_rel, tsk->ts_wakeup); \
++ ts_frame = timespec_sub(tsk->ts_rel, tsk->ts_queue); \
++ dev_dbg(tsk->dev, "[0x%p] no-0x%x, ts_q:%ldus, ts_do:%ldus," \
++ "ts_waitirq:%ldus,ts_sche:%ldus, ts_rel:%ldus," \
++ "ts_frame: %ldus\n", tsk, tsk->task_no, \
++ ts_queue.tv_nsec / NSEC_PER_USEC + ts_queue.tv_sec * USEC_PER_SEC,\
++ ts_dotask.tv_nsec / NSEC_PER_USEC + ts_dotask.tv_sec * USEC_PER_SEC,\
++ ts_waitirq.tv_nsec / NSEC_PER_USEC + ts_waitirq.tv_sec * USEC_PER_SEC,\
++ ts_sche.tv_nsec / NSEC_PER_USEC + ts_sche.tv_sec * USEC_PER_SEC,\
++ ts_rel.tv_nsec / NSEC_PER_USEC + ts_rel.tv_sec * USEC_PER_SEC,\
++ ts_frame.tv_nsec / NSEC_PER_USEC + ts_frame.tv_sec * USEC_PER_SEC); \
++ if ((ts_frame.tv_nsec/NSEC_PER_USEC + ts_frame.tv_sec*USEC_PER_SEC) > \
++ 80000) \
++ dev_dbg(tsk->dev, "ts_frame larger than 80ms [0x%p] no-0x%x.\n"\
++ , tsk, tsk->task_no); \
++} while (0)
++#else
++#define CHECK_PERF(ts)
++#define DECLARE_PERF_VAR
++#define PRINT_TASK_STATISTICS
++#endif
++
++#define IPU_PP_CH_VF (IPU_TASK_ID_VF - 1)
++#define IPU_PP_CH_PP (IPU_TASK_ID_PP - 1)
++#define MAX_PP_CH (IPU_TASK_ID_MAX - 1)
++#define VDOA_DEF_TIMEOUT_MS (HZ/2)
++
++/* Strucutures and variables for exporting MXC IPU as device*/
++typedef enum {
++ STATE_OK = 0,
++ STATE_QUEUE,
++ STATE_IN_PROGRESS,
++ STATE_ERR,
++ STATE_TIMEOUT,
++ STATE_RES_TIMEOUT,
++ STATE_NO_IPU,
++ STATE_NO_IRQ,
++ STATE_IPU_BUSY,
++ STATE_IRQ_FAIL,
++ STATE_IRQ_TIMEOUT,
++ STATE_ENABLE_CHAN_FAIL,
++ STATE_DISABLE_CHAN_FAIL,
++ STATE_SEL_BUF_FAIL,
++ STATE_INIT_CHAN_FAIL,
++ STATE_LINK_CHAN_FAIL,
++ STATE_UNLINK_CHAN_FAIL,
++ STATE_INIT_CHAN_BUF_FAIL,
++ STATE_INIT_CHAN_BAND_FAIL,
++ STATE_SYS_NO_MEM,
++ STATE_VDOA_IRQ_TIMEOUT,
++ STATE_VDOA_IRQ_FAIL,
++ STATE_VDOA_TASK_FAIL,
++} ipu_state_t;
++
++enum {
++ INPUT_CHAN_VDI_P = 1,
++ INPUT_CHAN,
++ INPUT_CHAN_VDI_N,
++};
++
++struct ipu_state_msg {
++ int state;
++ char *msg;
++} state_msg[] = {
++ {STATE_OK, "ok"},
++ {STATE_QUEUE, "split queue"},
++ {STATE_IN_PROGRESS, "split in progress"},
++ {STATE_ERR, "error"},
++ {STATE_TIMEOUT, "split task timeout"},
++ {STATE_RES_TIMEOUT, "wait resource timeout"},
++ {STATE_NO_IPU, "no ipu found"},
++ {STATE_NO_IRQ, "no irq found for task"},
++ {STATE_IPU_BUSY, "ipu busy"},
++ {STATE_IRQ_FAIL, "request irq failed"},
++ {STATE_IRQ_TIMEOUT, "wait for irq timeout"},
++ {STATE_ENABLE_CHAN_FAIL, "ipu enable channel fail"},
++ {STATE_DISABLE_CHAN_FAIL, "ipu disable channel fail"},
++ {STATE_SEL_BUF_FAIL, "ipu select buf fail"},
++ {STATE_INIT_CHAN_FAIL, "ipu init channel fail"},
++ {STATE_LINK_CHAN_FAIL, "ipu link channel fail"},
++ {STATE_UNLINK_CHAN_FAIL, "ipu unlink channel fail"},
++ {STATE_INIT_CHAN_BUF_FAIL, "ipu init channel buffer fail"},
++ {STATE_INIT_CHAN_BAND_FAIL, "ipu init channel band mode fail"},
++ {STATE_SYS_NO_MEM, "sys no mem: -ENOMEM"},
++ {STATE_VDOA_IRQ_TIMEOUT, "wait for vdoa irq timeout"},
++ {STATE_VDOA_IRQ_FAIL, "vdoa irq fail"},
++ {STATE_VDOA_TASK_FAIL, "vdoa task fail"},
++};
++
++struct stripe_setting {
++ u32 iw;
++ u32 ih;
++ u32 ow;
++ u32 oh;
++ u32 outh_resize_ratio;
++ u32 outv_resize_ratio;
++ u32 i_left_pos;
++ u32 i_right_pos;
++ u32 i_top_pos;
++ u32 i_bottom_pos;
++ u32 o_left_pos;
++ u32 o_right_pos;
++ u32 o_top_pos;
++ u32 o_bottom_pos;
++ u32 rl_split_line;
++ u32 ud_split_line;
++};
++
++struct task_set {
++#define NULL_MODE 0x0
++#define IC_MODE 0x1
++#define ROT_MODE 0x2
++#define VDI_MODE 0x4
++#define IPU_PREPROCESS_MODE_MASK (IC_MODE | ROT_MODE | VDI_MODE)
++/* VDOA_MODE means this task use vdoa, and VDOA has two modes:
++ * BAND MODE and non-BAND MODE. Non-band mode will do transfer data
++ * to memory. BAND mode needs hareware sync with IPU, it is used default
++ * if connected to VDIC.
++ */
++#define VDOA_MODE 0x8
++#define VDOA_BAND_MODE 0x10
++ u8 mode;
++#define IC_VF 0x1
++#define IC_PP 0x2
++#define ROT_VF 0x4
++#define ROT_PP 0x8
++#define VDI_VF 0x10
++#define VDOA_ONLY 0x20
++ u8 task;
++#define NO_SPLIT 0x0
++#define RL_SPLIT 0x1
++#define UD_SPLIT 0x2
++#define LEFT_STRIPE 0x1
++#define RIGHT_STRIPE 0x2
++#define UP_STRIPE 0x4
++#define DOWN_STRIPE 0x8
++#define SPLIT_MASK 0xF
++ u8 split_mode;
++ u8 band_lines;
++ ipu_channel_t ic_chan;
++ ipu_channel_t rot_chan;
++ ipu_channel_t vdi_ic_p_chan;
++ ipu_channel_t vdi_ic_n_chan;
++
++ u32 i_off;
++ u32 i_uoff;
++ u32 i_voff;
++ u32 istride;
++
++ u32 ov_off;
++ u32 ov_uoff;
++ u32 ov_voff;
++ u32 ovstride;
++
++ u32 ov_alpha_off;
++ u32 ov_alpha_stride;
++
++ u32 o_off;
++ u32 o_uoff;
++ u32 o_voff;
++ u32 ostride;
++
++ u32 r_fmt;
++ u32 r_width;
++ u32 r_height;
++ u32 r_stride;
++ dma_addr_t r_paddr;
++
++ struct stripe_setting sp_setting;
++};
++
++struct ipu_split_task {
++ struct ipu_task task;
++ struct ipu_task_entry *parent_task;
++ struct ipu_task_entry *child_task;
++ u32 task_no;
++};
++
++struct ipu_task_entry {
++ struct ipu_input input;
++ struct ipu_output output;
++
++ bool overlay_en;
++ struct ipu_overlay overlay;
++#define DEF_TIMEOUT_MS 1000
++#define DEF_DELAY_MS 20
++ int timeout;
++ int irq;
++
++ u8 task_id;
++ u8 ipu_id;
++ u8 task_in_list;
++ u8 split_done;
++ struct mutex split_lock;
++ struct mutex vdic_lock;
++ wait_queue_head_t split_waitq;
++
++ struct list_head node;
++ struct list_head split_list;
++ struct ipu_soc *ipu;
++ struct device *dev;
++ struct task_set set;
++ wait_queue_head_t task_waitq;
++ struct completion irq_comp;
++ struct kref refcount;
++ ipu_state_t state;
++ u32 task_no;
++ atomic_t done;
++ atomic_t res_free;
++ atomic_t res_get;
++
++ struct ipu_task_entry *parent;
++ char *vditmpbuf[2];
++ u32 old_save_lines;
++ u32 old_size;
++ bool buf1filled;
++ bool buf0filled;
++
++ vdoa_handle_t vdoa_handle;
++ struct vdoa_output_mem {
++ void *vaddr;
++ dma_addr_t paddr;
++ int size;
++ } vdoa_dma;
++
++#ifdef DBG_IPU_PERF
++ struct timespec ts_queue;
++ struct timespec ts_dotask;
++ struct timespec ts_waitirq;
++ struct timespec ts_inirq;
++ struct timespec ts_wakeup;
++ struct timespec ts_rel;
++#endif
++};
++
++struct ipu_channel_tabel {
++ struct mutex lock;
++ u8 used[MXC_IPU_MAX_NUM][MAX_PP_CH];
++ u8 vdoa_used;
++};
++
++struct ipu_thread_data {
++ struct ipu_soc *ipu;
++ u32 id;
++ u32 is_vdoa;
++};
++
++struct ipu_alloc_list {
++ struct list_head list;
++ dma_addr_t phy_addr;
++ void *cpu_addr;
++ u32 size;
++ void *file_index;
++};
++
++static LIST_HEAD(ipu_alloc_list);
++static DEFINE_MUTEX(ipu_alloc_lock);
++static struct ipu_channel_tabel ipu_ch_tbl;
++static LIST_HEAD(ipu_task_list);
++static DEFINE_SPINLOCK(ipu_task_list_lock);
++static DECLARE_WAIT_QUEUE_HEAD(thread_waitq);
++static DECLARE_WAIT_QUEUE_HEAD(res_waitq);
++static atomic_t req_cnt;
++static atomic_t file_index = ATOMIC_INIT(1);
++static int major;
++static int max_ipu_no;
++static int thread_id;
++static atomic_t frame_no;
++static struct class *ipu_class;
++static struct device *ipu_dev;
++static int debug;
++module_param(debug, int, 0600);
++#ifdef DBG_IPU_PERF
++static struct timespec ts_frame_max;
++static u32 ts_frame_avg;
++static atomic_t frame_cnt;
++#endif
++
++static bool deinterlace_3_field(struct ipu_task_entry *t)
++{
++ return ((t->set.mode & VDI_MODE) &&
++ (t->input.deinterlace.motion != HIGH_MOTION));
++}
++
++static u32 tiled_filed_size(struct ipu_task_entry *t)
++{
++ u32 field_size;
++
++ /* note: page_align is required by VPU hw ouput buffer */
++ field_size = TILED_NV12_FRAME_SIZE(t->input.width, t->input.height/2);
++ return field_size;
++}
++
++static bool only_ic(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return ((mode == IC_MODE) || (mode == VDI_MODE));
++}
++
++static bool only_rot(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return (mode == ROT_MODE);
++}
++
++static bool ic_and_rot(u8 mode)
++{
++ mode = mode & IPU_PREPROCESS_MODE_MASK;
++ return ((mode == (IC_MODE | ROT_MODE)) ||
++ (mode == (VDI_MODE | ROT_MODE)));
++}
++
++static bool need_split(struct ipu_task_entry *t)
++{
++ return ((t->set.split_mode != NO_SPLIT) || (t->task_no & SPLIT_MASK));
++}
++
++unsigned int fmt_to_bpp(unsigned int pixelformat)
++{
++ u32 bpp;
++
++ switch (pixelformat) {
++ case IPU_PIX_FMT_RGB565:
++ /*interleaved 422*/
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_UYVY:
++ /*non-interleaved 422*/
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ bpp = 16;
++ break;
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_YUV444P:
++ bpp = 24;
++ break;
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_ABGR32:
++ bpp = 32;
++ break;
++ /*non-interleaved 420*/
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_NV12:
++ bpp = 12;
++ break;
++ default:
++ bpp = 8;
++ break;
++ }
++ return bpp;
++}
++EXPORT_SYMBOL_GPL(fmt_to_bpp);
++
++cs_t colorspaceofpixel(int fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_RGB666:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_ABGR32:
++ return RGB_CS;
++ break;
++ case IPU_PIX_FMT_UYVY:
++ case IPU_PIX_FMT_YUYV:
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_YUV444P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_TILED_NV12:
++ case IPU_PIX_FMT_TILED_NV12F:
++ return YUV_CS;
++ break;
++ default:
++ return NULL_CS;
++ }
++}
++EXPORT_SYMBOL_GPL(colorspaceofpixel);
++
++int need_csc(int ifmt, int ofmt)
++{
++ cs_t ics, ocs;
++
++ ics = colorspaceofpixel(ifmt);
++ ocs = colorspaceofpixel(ofmt);
++
++ if ((ics == NULL_CS) || (ocs == NULL_CS))
++ return -1;
++ else if (ics != ocs)
++ return 1;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(need_csc);
++
++static int soc_max_in_width(u32 is_vdoa)
++{
++ return is_vdoa ? 8192 : 4096;
++}
++
++static int soc_max_vdi_in_width(void)
++{
++ return IPU_MAX_VDI_IN_WIDTH;
++}
++static int soc_max_in_height(void)
++{
++ return 4096;
++}
++
++static int soc_max_out_width(void)
++{
++ /* mx51/mx53/mx6q is 1024*/
++ return 1024;
++}
++
++static int soc_max_out_height(void)
++{
++ /* mx51/mx53/mx6q is 1024*/
++ return 1024;
++}
++
++static void dump_task_info(struct ipu_task_entry *t)
++{
++ if (!debug)
++ return;
++ dev_dbg(t->dev, "[0x%p]input:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->input.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->input.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->input.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n", (void *)t, t->input.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n", (void *)t, t->input.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->input.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->input.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]input buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->input.paddr);
++ dev_dbg(t->dev, "[0x%p]\ti_off = 0x%x\n", (void *)t, t->set.i_off);
++ dev_dbg(t->dev, "[0x%p]\ti_uoff = 0x%x\n", (void *)t, t->set.i_uoff);
++ dev_dbg(t->dev, "[0x%p]\ti_voff = 0x%x\n", (void *)t, t->set.i_voff);
++ dev_dbg(t->dev, "[0x%p]\tistride = %d\n", (void *)t, t->set.istride);
++ if (t->input.deinterlace.enable) {
++ dev_dbg(t->dev, "[0x%p]deinterlace enabled with:\n", (void *)t);
++ if (t->input.deinterlace.motion != HIGH_MOTION) {
++ dev_dbg(t->dev, "[0x%p]\tlow/medium motion\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr_n = 0x%x\n",
++ (void *)t, t->input.paddr_n);
++ } else
++ dev_dbg(t->dev, "[0x%p]\thigh motion\n", (void *)t);
++ }
++
++ dev_dbg(t->dev, "[0x%p]output:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->output.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->output.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->output.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n", (void *)t, t->output.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n", (void *)t, t->output.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->output.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->output.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]\trotate = %d\n", (void *)t, t->output.rotate);
++ dev_dbg(t->dev, "[0x%p]output buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->output.paddr);
++ dev_dbg(t->dev, "[0x%p]\to_off = 0x%x\n", (void *)t, t->set.o_off);
++ dev_dbg(t->dev, "[0x%p]\to_uoff = 0x%x\n", (void *)t, t->set.o_uoff);
++ dev_dbg(t->dev, "[0x%p]\to_voff = 0x%x\n", (void *)t, t->set.o_voff);
++ dev_dbg(t->dev, "[0x%p]\tostride = %d\n", (void *)t, t->set.ostride);
++
++ if (t->overlay_en) {
++ dev_dbg(t->dev, "[0x%p]overlay:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n",
++ (void *)t, t->overlay.format);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n",
++ (void *)t, t->overlay.width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n",
++ (void *)t, t->overlay.height);
++ dev_dbg(t->dev, "[0x%p]\tcrop.w = %d\n",
++ (void *)t, t->overlay.crop.w);
++ dev_dbg(t->dev, "[0x%p]\tcrop.h = %d\n",
++ (void *)t, t->overlay.crop.h);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.x = %d\n",
++ (void *)t, t->overlay.crop.pos.x);
++ dev_dbg(t->dev, "[0x%p]\tcrop.pos.y = %d\n",
++ (void *)t, t->overlay.crop.pos.y);
++ dev_dbg(t->dev, "[0x%p]overlay buffer:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n",
++ (void *)t, t->overlay.paddr);
++ dev_dbg(t->dev, "[0x%p]\tov_off = 0x%x\n",
++ (void *)t, t->set.ov_off);
++ dev_dbg(t->dev, "[0x%p]\tov_uoff = 0x%x\n",
++ (void *)t, t->set.ov_uoff);
++ dev_dbg(t->dev, "[0x%p]\tov_voff = 0x%x\n",
++ (void *)t, t->set.ov_voff);
++ dev_dbg(t->dev, "[0x%p]\tovstride = %d\n",
++ (void *)t, t->set.ovstride);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ dev_dbg(t->dev, "[0x%p]local alpha enabled with:\n",
++ (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n",
++ (void *)t, t->overlay.alpha.loc_alp_paddr);
++ dev_dbg(t->dev, "[0x%p]\tov_alpha_off = 0x%x\n",
++ (void *)t, t->set.ov_alpha_off);
++ dev_dbg(t->dev, "[0x%p]\tov_alpha_stride = %d\n",
++ (void *)t, t->set.ov_alpha_stride);
++ } else
++ dev_dbg(t->dev, "[0x%p]globle alpha enabled with value 0x%x\n",
++ (void *)t, t->overlay.alpha.gvalue);
++ if (t->overlay.colorkey.enable)
++ dev_dbg(t->dev, "[0x%p]colorkey enabled with value 0x%x\n",
++ (void *)t, t->overlay.colorkey.value);
++ }
++
++ dev_dbg(t->dev, "[0x%p]want task_id = %d\n", (void *)t, t->task_id);
++ dev_dbg(t->dev, "[0x%p]want task mode is 0x%x\n",
++ (void *)t, t->set.mode);
++ dev_dbg(t->dev, "[0x%p]\tIC_MODE = 0x%x\n", (void *)t, IC_MODE);
++ dev_dbg(t->dev, "[0x%p]\tROT_MODE = 0x%x\n", (void *)t, ROT_MODE);
++ dev_dbg(t->dev, "[0x%p]\tVDI_MODE = 0x%x\n", (void *)t, VDI_MODE);
++ dev_dbg(t->dev, "[0x%p]\tTask_no = 0x%x\n\n\n", (void *)t, t->task_no);
++}
++
++static void dump_check_err(struct device *dev, int err)
++{
++ switch (err) {
++ case IPU_CHECK_ERR_INPUT_CROP:
++ dev_err(dev, "input crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_OUTPUT_CROP:
++ dev_err(dev, "output crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_OVERLAY_CROP:
++ dev_err(dev, "overlay crop setting error\n");
++ break;
++ case IPU_CHECK_ERR_INPUT_OVER_LIMIT:
++ dev_err(dev, "input over limitation\n");
++ break;
++ case IPU_CHECK_ERR_OVERLAY_WITH_VDI:
++ dev_err(dev, "do not support overlay with deinterlace\n");
++ break;
++ case IPU_CHECK_ERR_OV_OUT_NO_FIT:
++ dev_err(dev,
++ "width/height of overlay and ic output should be same\n");
++ break;
++ case IPU_CHECK_ERR_PROC_NO_NEED:
++ dev_err(dev, "no ipu processing need\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_INPUTW_OVER:
++ dev_err(dev, "split mode input width overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_INPUTH_OVER:
++ dev_err(dev, "split mode input height overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER:
++ dev_err(dev, "split mode output width overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER:
++ dev_err(dev, "split mode output height overflow\n");
++ break;
++ case IPU_CHECK_ERR_SPLIT_WITH_ROT:
++ dev_err(dev, "not support split mode with rotation\n");
++ break;
++ case IPU_CHECK_ERR_W_DOWNSIZE_OVER:
++ dev_err(dev, "horizontal downsizing ratio overflow\n");
++ break;
++ case IPU_CHECK_ERR_H_DOWNSIZE_OVER:
++ dev_err(dev, "vertical downsizing ratio overflow\n");
++ break;
++ default:
++ break;
++ }
++}
++
++static void dump_check_warn(struct device *dev, int warn)
++{
++ if (warn & IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN)
++ dev_warn(dev, "input u/v offset not 8 align\n");
++ if (warn & IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN)
++ dev_warn(dev, "output u/v offset not 8 align\n");
++ if (warn & IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN)
++ dev_warn(dev, "overlay u/v offset not 8 align\n");
++}
++
++static int set_crop(struct ipu_crop *crop, int width, int height, int fmt)
++{
++ if ((width == 0) || (height == 0)) {
++ pr_err("Invalid param: width=%d, height=%d\n", width, height);
++ return -EINVAL;
++ }
++
++ if ((IPU_PIX_FMT_TILED_NV12 == fmt) ||
++ (IPU_PIX_FMT_TILED_NV12F == fmt)) {
++ if (crop->w || crop->h) {
++ if (((crop->w + crop->pos.x) > width)
++ || ((crop->h + crop->pos.y) > height)
++ || (0 != (crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->pos.x % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->pos.y % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ ) {
++ pr_err("set_crop error MB align.\n");
++ return -EINVAL;
++ }
++ } else {
++ crop->pos.x = 0;
++ crop->pos.y = 0;
++ crop->w = width;
++ crop->h = height;
++ if ((0 != (crop->w % IPU_PIX_FMT_TILED_NV12_MBALIGN))
++ || (0 != (crop->h % IPU_PIX_FMT_TILED_NV12_MBALIGN))) {
++ pr_err("set_crop error w/h MB align.\n");
++ return -EINVAL;
++ }
++ }
++ } else {
++ if (crop->w || crop->h) {
++ if (((crop->w + crop->pos.x) > (width + 16))
++ || ((crop->h + crop->pos.y) > height + 16)) {
++ pr_err("set_crop error exceeds width/height.\n");
++ return -EINVAL;
++ }
++ } else {
++ crop->pos.x = 0;
++ crop->pos.y = 0;
++ crop->w = width;
++ crop->h = height;
++ }
++ crop->w -= crop->w%8;
++ crop->h -= crop->h%8;
++ }
++
++ if ((crop->w == 0) || (crop->h == 0)) {
++ pr_err("Invalid crop param: crop.w=%d, crop.h=%d\n",
++ crop->w, crop->h);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void update_offset(unsigned int fmt,
++ unsigned int width, unsigned int height,
++ unsigned int pos_x, unsigned int pos_y,
++ int *off, int *uoff, int *voff, int *stride)
++{
++ /* NOTE: u v offset should based on start point of off*/
++ switch (fmt) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + (width/2) * (pos_y/2) + pos_x/2;
++ /* In case height is odd, round up to even */
++ *voff = *uoff + (width/2) * ((height+1)/2);
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ *off = pos_y * width + pos_x;
++ *voff = (width * (height - pos_y) - pos_x)
++ + (width/2) * (pos_y/2) + pos_x/2;
++ /* In case height is odd, round up to even */
++ *uoff = *voff + (width/2) * ((height+1)/2);
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ *off = pos_y * width + pos_x;
++ *voff = (width * (height - pos_y) - pos_x)
++ + (width/2) * pos_y + pos_x/2;
++ *uoff = *voff + (width/2) * height;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + (width/2) * pos_y + pos_x/2;
++ *voff = *uoff + (width/2) * height;
++ break;
++ case IPU_PIX_FMT_YUV444P:
++ *off = pos_y * width + pos_x;
++ *uoff = width * height;
++ *voff = width * height * 2;
++ break;
++ case IPU_PIX_FMT_NV12:
++ *off = pos_y * width + pos_x;
++ *uoff = (width * (height - pos_y) - pos_x)
++ + width * (pos_y/2) + pos_x;
++ break;
++ case IPU_PIX_FMT_TILED_NV12:
++ /*
++ * tiled format, progressive:
++ * assuming that line is aligned with MB height (aligned to 16)
++ * offset = line * stride + (pixel / MB_width) * pixels_in_MB
++ * = line * stride + (pixel / 16) * 256
++ * = line * stride + pixel * 16
++ */
++ *off = pos_y * width + (pos_x << 4);
++ *uoff = ALIGN(width * height, SZ_4K) + (*off >> 1) - *off;
++ break;
++ case IPU_PIX_FMT_TILED_NV12F:
++ /*
++ * tiled format, interlaced:
++ * same as above, only number of pixels in MB is 128,
++ * instead of 256
++ */
++ *off = (pos_y >> 1) * width + (pos_x << 3);
++ *uoff = ALIGN(width * height/2, SZ_4K) + (*off >> 1) - *off;
++ break;
++ default:
++ *off = (pos_y * width + pos_x) * fmt_to_bpp(fmt)/8;
++ break;
++ }
++ *stride = width * bytes_per_pixel(fmt);
++}
++
++static int update_split_setting(struct ipu_task_entry *t, bool vdi_split)
++{
++ struct stripe_param left_stripe;
++ struct stripe_param right_stripe;
++ struct stripe_param up_stripe;
++ struct stripe_param down_stripe;
++ u32 iw, ih, ow, oh;
++ u32 max_width;
++ int ret;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT)
++ return IPU_CHECK_ERR_SPLIT_WITH_ROT;
++
++ iw = t->input.crop.w;
++ ih = t->input.crop.h;
++
++ ow = t->output.crop.w;
++ oh = t->output.crop.h;
++
++ memset(&left_stripe, 0, sizeof(left_stripe));
++ memset(&right_stripe, 0, sizeof(right_stripe));
++ memset(&up_stripe, 0, sizeof(up_stripe));
++ memset(&down_stripe, 0, sizeof(down_stripe));
++
++ if (t->set.split_mode & RL_SPLIT) {
++ /*
++ * We do want equal strips: initialize stripes in case
++ * calc_stripes returns before actually doing the calculation
++ */
++ left_stripe.input_width = iw / 2;
++ left_stripe.output_width = ow / 2;
++ right_stripe.input_column = iw / 2;
++ right_stripe.output_column = ow / 2;
++
++ if (vdi_split)
++ max_width = soc_max_vdi_in_width();
++ else
++ max_width = soc_max_out_width();
++ ret = ipu_calc_stripes_sizes(iw,
++ ow,
++ max_width,
++ (((unsigned long long)1) << 32), /* 32bit for fractional*/
++ 1, /* equal stripes */
++ t->input.format,
++ t->output.format,
++ &left_stripe,
++ &right_stripe);
++ if (ret < 0)
++ return IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++ else if (ret)
++ dev_dbg(t->dev, "Warn: no:0x%x,calc_stripes ret:%d\n",
++ t->task_no, ret);
++ t->set.sp_setting.iw = left_stripe.input_width;
++ t->set.sp_setting.ow = left_stripe.output_width;
++ t->set.sp_setting.outh_resize_ratio = left_stripe.irr;
++ t->set.sp_setting.i_left_pos = left_stripe.input_column;
++ t->set.sp_setting.o_left_pos = left_stripe.output_column;
++ t->set.sp_setting.i_right_pos = right_stripe.input_column;
++ t->set.sp_setting.o_right_pos = right_stripe.output_column;
++ } else {
++ t->set.sp_setting.iw = iw;
++ t->set.sp_setting.ow = ow;
++ t->set.sp_setting.outh_resize_ratio = 0;
++ t->set.sp_setting.i_left_pos = 0;
++ t->set.sp_setting.o_left_pos = 0;
++ t->set.sp_setting.i_right_pos = 0;
++ t->set.sp_setting.o_right_pos = 0;
++ }
++ if ((t->set.sp_setting.iw + t->set.sp_setting.i_right_pos) > (iw+16))
++ return IPU_CHECK_ERR_SPLIT_INPUTW_OVER;
++ if (((t->set.sp_setting.ow + t->set.sp_setting.o_right_pos) > ow)
++ || (t->set.sp_setting.ow > soc_max_out_width()))
++ return IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER;
++ if (rounddown(t->set.sp_setting.ow, 8) * 8 <=
++ rounddown(t->set.sp_setting.iw, 8))
++ return IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++
++ if (t->set.split_mode & UD_SPLIT) {
++ /*
++ * We do want equal strips: initialize stripes in case
++ * calc_stripes returns before actually doing the calculation
++ */
++ up_stripe.input_width = ih / 2;
++ up_stripe.output_width = oh / 2;
++ down_stripe.input_column = ih / 2;
++ down_stripe.output_column = oh / 2;
++ ret = ipu_calc_stripes_sizes(ih,
++ oh,
++ soc_max_out_height(),
++ (((unsigned long long)1) << 32), /* 32bit for fractional*/
++ 0x1 | 0x2, /* equal stripes and vertical */
++ t->input.format,
++ t->output.format,
++ &up_stripe,
++ &down_stripe);
++ if (ret < 0)
++ return IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++ else if (ret)
++ dev_err(t->dev, "Warn: no:0x%x,calc_stripes ret:%d\n",
++ t->task_no, ret);
++ t->set.sp_setting.ih = up_stripe.input_width;
++ t->set.sp_setting.oh = up_stripe.output_width;
++ t->set.sp_setting.outv_resize_ratio = up_stripe.irr;
++ t->set.sp_setting.i_top_pos = up_stripe.input_column;
++ t->set.sp_setting.o_top_pos = up_stripe.output_column;
++ t->set.sp_setting.i_bottom_pos = down_stripe.input_column;
++ t->set.sp_setting.o_bottom_pos = down_stripe.output_column;
++ } else {
++ t->set.sp_setting.ih = ih;
++ t->set.sp_setting.oh = oh;
++ t->set.sp_setting.outv_resize_ratio = 0;
++ t->set.sp_setting.i_top_pos = 0;
++ t->set.sp_setting.o_top_pos = 0;
++ t->set.sp_setting.i_bottom_pos = 0;
++ t->set.sp_setting.o_bottom_pos = 0;
++ }
++
++ /* downscale case: enforce limits */
++ if (((t->set.sp_setting.ih + t->set.sp_setting.i_bottom_pos) > (ih))
++ && (t->set.sp_setting.ih >= t->set.sp_setting.oh))
++ return IPU_CHECK_ERR_SPLIT_INPUTH_OVER;
++ /* upscale case: relax limits because ipu_calc_stripes_sizes() may
++ create input stripe that falls just outside of the input window */
++ else if ((t->set.sp_setting.ih + t->set.sp_setting.i_bottom_pos)
++ > (ih+16))
++ return IPU_CHECK_ERR_SPLIT_INPUTH_OVER;
++ if (((t->set.sp_setting.oh + t->set.sp_setting.o_bottom_pos) > oh)
++ || (t->set.sp_setting.oh > soc_max_out_height()))
++ return IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER;
++ if (rounddown(t->set.sp_setting.oh, 8) * 8 <=
++ rounddown(t->set.sp_setting.ih, 8))
++ return IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++
++ return IPU_CHECK_OK;
++}
++
++static int check_task(struct ipu_task_entry *t)
++{
++ int tmp;
++ int ret = IPU_CHECK_OK;
++ int timeout;
++ bool vdi_split = false;
++ int ocw, och;
++
++ if ((IPU_PIX_FMT_TILED_NV12 == t->overlay.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->overlay.format) ||
++ (IPU_PIX_FMT_TILED_NV12 == t->output.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->output.format) ||
++ ((IPU_PIX_FMT_TILED_NV12F == t->input.format) &&
++ !t->input.deinterlace.enable)) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++
++ /* check input */
++ ret = set_crop(&t->input.crop, t->input.width, t->input.height,
++ t->input.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_INPUT_CROP;
++ goto done;
++ } else
++ update_offset(t->input.format, t->input.width, t->input.height,
++ t->input.crop.pos.x, t->input.crop.pos.y,
++ &t->set.i_off, &t->set.i_uoff,
++ &t->set.i_voff, &t->set.istride);
++
++ /* check output */
++ ret = set_crop(&t->output.crop, t->output.width, t->output.height,
++ t->output.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_OUTPUT_CROP;
++ goto done;
++ } else
++ update_offset(t->output.format,
++ t->output.width, t->output.height,
++ t->output.crop.pos.x, t->output.crop.pos.y,
++ &t->set.o_off, &t->set.o_uoff,
++ &t->set.o_voff, &t->set.ostride);
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*
++ * Cache output width and height and
++ * swap them so that we may check
++ * downsize overflow correctly.
++ */
++ ocw = t->output.crop.h;
++ och = t->output.crop.w;
++ } else {
++ ocw = t->output.crop.w;
++ och = t->output.crop.h;
++ }
++
++ if (ocw * 8 <= t->input.crop.w) {
++ ret = IPU_CHECK_ERR_W_DOWNSIZE_OVER;
++ goto done;
++ }
++
++ if (och * 8 <= t->input.crop.h) {
++ ret = IPU_CHECK_ERR_H_DOWNSIZE_OVER;
++ goto done;
++ }
++
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ if ((t->input.crop.w > soc_max_in_width(1)) ||
++ (t->input.crop.h > soc_max_in_height())) {
++ ret = IPU_CHECK_ERR_INPUT_OVER_LIMIT;
++ goto done;
++ }
++ /* output fmt: NV12 and YUYV, now don't support resize */
++ if (((IPU_PIX_FMT_NV12 != t->output.format) &&
++ (IPU_PIX_FMT_YUYV != t->output.format)) ||
++ (t->input.crop.w != t->output.crop.w) ||
++ (t->input.crop.h != t->output.crop.h)) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++ }
++
++ /* check overlay if there is */
++ if (t->overlay_en) {
++ if (t->input.deinterlace.enable) {
++ ret = IPU_CHECK_ERR_OVERLAY_WITH_VDI;
++ goto done;
++ }
++
++ ret = set_crop(&t->overlay.crop, t->overlay.width,
++ t->overlay.height, t->overlay.format);
++ if (ret < 0) {
++ ret = IPU_CHECK_ERR_OVERLAY_CROP;
++ goto done;
++ } else {
++ ocw = t->output.crop.w;
++ och = t->output.crop.h;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ ocw = t->output.crop.h;
++ och = t->output.crop.w;
++ }
++ if ((t->overlay.crop.w != ocw) ||
++ (t->overlay.crop.h != och)) {
++ ret = IPU_CHECK_ERR_OV_OUT_NO_FIT;
++ goto done;
++ }
++
++ update_offset(t->overlay.format,
++ t->overlay.width, t->overlay.height,
++ t->overlay.crop.pos.x, t->overlay.crop.pos.y,
++ &t->set.ov_off, &t->set.ov_uoff,
++ &t->set.ov_voff, &t->set.ovstride);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ t->set.ov_alpha_stride = t->overlay.width;
++ t->set.ov_alpha_off = t->overlay.crop.pos.y *
++ t->overlay.width + t->overlay.crop.pos.x;
++ }
++ }
++ }
++
++ /* input overflow? */
++ if (!((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format))) {
++ if ((t->input.crop.w > soc_max_in_width(0)) ||
++ (t->input.crop.h > soc_max_in_height())) {
++ ret = IPU_CHECK_ERR_INPUT_OVER_LIMIT;
++ goto done;
++ }
++ }
++
++ /* check task mode */
++ t->set.mode = NULL_MODE;
++ t->set.split_mode = NO_SPLIT;
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*output swap*/
++ tmp = t->output.crop.w;
++ t->output.crop.w = t->output.crop.h;
++ t->output.crop.h = tmp;
++ }
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT)
++ t->set.mode |= ROT_MODE;
++
++ /*need resize or CSC?*/
++ if ((t->input.crop.w != t->output.crop.w) ||
++ (t->input.crop.h != t->output.crop.h) ||
++ need_csc(t->input.format, t->output.format))
++ t->set.mode |= IC_MODE;
++
++ /*need cropping?*/
++ if ((t->input.crop.w != t->input.width) ||
++ (t->input.crop.h != t->input.height) ||
++ (t->output.crop.w != t->output.width) ||
++ (t->output.crop.h != t->output.height))
++ t->set.mode |= IC_MODE;
++
++ /*need flip?*/
++ if ((t->set.mode == NULL_MODE) && (t->output.rotate > IPU_ROTATE_NONE))
++ t->set.mode |= IC_MODE;
++
++ /*need IDMAC do format(same color space)?*/
++ if ((t->set.mode == NULL_MODE) && (t->input.format != t->output.format))
++ t->set.mode |= IC_MODE;
++
++ /*overlay support*/
++ if (t->overlay_en)
++ t->set.mode |= IC_MODE;
++
++ /*deinterlace*/
++ if (t->input.deinterlace.enable) {
++ t->set.mode &= ~IC_MODE;
++ t->set.mode |= VDI_MODE;
++ }
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ if (t->set.mode & ROT_MODE) {
++ ret = IPU_CHECK_ERR_NOT_SUPPORT;
++ goto done;
++ }
++ t->set.mode |= VDOA_MODE;
++ if (IPU_PIX_FMT_TILED_NV12F == t->input.format)
++ t->set.mode |= VDOA_BAND_MODE;
++ t->set.mode &= ~IC_MODE;
++ }
++
++ if ((t->set.mode & (IC_MODE | VDI_MODE)) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ if (t->output.crop.w > soc_max_out_width())
++ t->set.split_mode |= RL_SPLIT;
++ if (t->output.crop.h > soc_max_out_height())
++ t->set.split_mode |= UD_SPLIT;
++ if (!t->set.split_mode && (t->set.mode & VDI_MODE) &&
++ (t->input.crop.w > soc_max_vdi_in_width())) {
++ t->set.split_mode |= RL_SPLIT;
++ vdi_split = true;
++ }
++ if (t->set.split_mode) {
++ if ((t->set.split_mode == RL_SPLIT) ||
++ (t->set.split_mode == UD_SPLIT))
++ timeout = DEF_TIMEOUT_MS * 2 + DEF_DELAY_MS;
++ else
++ timeout = DEF_TIMEOUT_MS * 4 + DEF_DELAY_MS;
++ if (t->timeout < timeout)
++ t->timeout = timeout;
++
++ ret = update_split_setting(t, vdi_split);
++ if (ret > IPU_CHECK_ERR_MIN)
++ goto done;
++ }
++ }
++
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ /*output swap*/
++ tmp = t->output.crop.w;
++ t->output.crop.w = t->output.crop.h;
++ t->output.crop.h = tmp;
++ }
++
++ if (t->set.mode == NULL_MODE) {
++ ret = IPU_CHECK_ERR_PROC_NO_NEED;
++ goto done;
++ }
++
++ if ((t->set.i_uoff % 8) || (t->set.i_voff % 8))
++ ret |= IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN;
++ if ((t->set.o_uoff % 8) || (t->set.o_voff % 8))
++ ret |= IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN;
++ if (t->overlay_en && ((t->set.ov_uoff % 8) || (t->set.ov_voff % 8)))
++ ret |= IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN;
++
++done:
++ /* dump msg */
++ if (debug) {
++ if (ret > IPU_CHECK_ERR_MIN)
++ dump_check_err(t->dev, ret);
++ else if (ret != IPU_CHECK_OK)
++ dump_check_warn(t->dev, ret);
++ }
++
++ return ret;
++}
++
++static int prepare_task(struct ipu_task_entry *t)
++{
++ int ret = 0;
++
++ ret = check_task(t);
++ if (ret > IPU_CHECK_ERR_MIN)
++ return -EINVAL;
++
++ if (t->set.mode & VDI_MODE) {
++ t->task_id = IPU_TASK_ID_VF;
++ t->set.task = VDI_VF;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_VF;
++ }
++
++ if (VDOA_MODE == t->set.mode) {
++ if (t->set.task != 0) {
++ dev_err(t->dev, "ERR: vdoa only task:0x%x, [0x%p].\n",
++ t->set.task, t);
++ return -EINVAL;
++ }
++ t->set.task |= VDOA_ONLY;
++ }
++
++ if (VDOA_BAND_MODE & t->set.mode) {
++ /* to save band size: 1<<3 = 8 lines */
++ t->set.band_lines = 3;
++ }
++
++ dump_task_info(t);
++
++ return ret;
++}
++
++static uint32_t ic_vf_pp_is_busy(struct ipu_soc *ipu, bool is_vf)
++{
++ uint32_t status;
++ uint32_t status_vf;
++ uint32_t status_rot;
++
++ if (is_vf) {
++ status = ipu_channel_status(ipu, MEM_VDI_PRP_VF_MEM);
++ status_vf = ipu_channel_status(ipu, MEM_PRP_VF_MEM);
++ status_rot = ipu_channel_status(ipu, MEM_ROT_VF_MEM);
++ return status || status_vf || status_rot;
++ } else {
++ status = ipu_channel_status(ipu, MEM_PP_MEM);
++ status_rot = ipu_channel_status(ipu, MEM_ROT_PP_MEM);
++ return status || status_rot;
++ }
++}
++
++static int _get_vdoa_ipu_res(struct ipu_task_entry *t)
++{
++ int i;
++ struct ipu_soc *ipu;
++ u8 *used;
++ uint32_t found_ipu = 0;
++ uint32_t found_vdoa = 0;
++ struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
++
++ mutex_lock(&tbl->lock);
++ if (t->set.mode & VDOA_MODE) {
++ if (NULL != t->vdoa_handle)
++ found_vdoa = 1;
++ else {
++ found_vdoa = tbl->vdoa_used ? 0 : 1;
++ if (found_vdoa) {
++ tbl->vdoa_used = 1;
++ vdoa_get_handle(&t->vdoa_handle);
++ } else
++ /* first get vdoa->ipu resource sequence */
++ goto out;
++ if (t->set.task & VDOA_ONLY)
++ goto out;
++ }
++ }
++
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu = ipu_get_soc(i);
++ if (IS_ERR(ipu))
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, ipu:%d\n",
++ t->task_no, found_vdoa, i);
++
++ used = &tbl->used[i][IPU_PP_CH_VF];
++ if (t->set.mode & VDI_MODE) {
++ if (0 == *used) {
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ } else if ((t->set.mode & IC_MODE) || only_rot(t->set.mode)) {
++ if (0 == *used) {
++ t->task_id = IPU_TASK_ID_VF;
++ if (t->set.mode & IC_MODE)
++ t->set.task |= IC_VF;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_VF;
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ } else
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, mode:0x%x\n",
++ t->task_no, found_vdoa, t->set.mode);
++ }
++ if (found_ipu)
++ goto next;
++
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu = ipu_get_soc(i);
++ if (IS_ERR(ipu))
++ dev_err(t->dev, "no:0x%x,found_vdoa:%d, ipu:%d\n",
++ t->task_no, found_vdoa, i);
++
++ if ((t->set.mode & IC_MODE) || only_rot(t->set.mode)) {
++ used = &tbl->used[i][IPU_PP_CH_PP];
++ if (0 == *used) {
++ t->task_id = IPU_TASK_ID_PP;
++ if (t->set.mode & IC_MODE)
++ t->set.task |= IC_PP;
++ if (t->set.mode & ROT_MODE)
++ t->set.task |= ROT_PP;
++ *used = 1;
++ found_ipu = 1;
++ break;
++ }
++ }
++ }
++
++next:
++ if (found_ipu) {
++ t->ipu = ipu;
++ t->ipu_id = i;
++ t->dev = ipu->dev;
++ if (atomic_inc_return(&t->res_get) == 2)
++ dev_err(t->dev,
++ "ERR no:0x%x,found_vdoa:%d,get ipu twice\n",
++ t->task_no, found_vdoa);
++ }
++out:
++ dev_dbg(t->dev,
++ "%s:no:0x%x,found_vdoa:%d, found_ipu:%d\n",
++ __func__, t->task_no, found_vdoa, found_ipu);
++ mutex_unlock(&tbl->lock);
++ if (t->set.task & VDOA_ONLY)
++ return found_vdoa;
++ else if (t->set.mode & VDOA_MODE)
++ return found_vdoa && found_ipu;
++ else
++ return found_ipu;
++}
++
++static void put_vdoa_ipu_res(struct ipu_task_entry *tsk, int vdoa_only)
++{
++ int ret;
++ int rel_vdoa = 0, rel_ipu = 0;
++ struct ipu_channel_tabel *tbl = &ipu_ch_tbl;
++
++ mutex_lock(&tbl->lock);
++ if (tsk->set.mode & VDOA_MODE) {
++ if (!tbl->vdoa_used && tsk->vdoa_handle)
++ dev_err(tsk->dev,
++ "ERR no:0x%x,vdoa not used,mode:0x%x\n",
++ tsk->task_no, tsk->set.mode);
++ if (tbl->vdoa_used && tsk->vdoa_handle) {
++ tbl->vdoa_used = 0;
++ vdoa_put_handle(&tsk->vdoa_handle);
++ if (tsk->ipu)
++ tsk->ipu->vdoa_en = 0;
++ rel_vdoa = 1;
++ if (vdoa_only || (tsk->set.task & VDOA_ONLY))
++ goto out;
++ }
++ }
++
++ tbl->used[tsk->ipu_id][tsk->task_id - 1] = 0;
++ rel_ipu = 1;
++ ret = atomic_inc_return(&tsk->res_free);
++ if (ret == 2)
++ dev_err(tsk->dev,
++ "ERR no:0x%x,rel_vdoa:%d,put ipu twice\n",
++ tsk->task_no, rel_vdoa);
++out:
++ dev_dbg(tsk->dev,
++ "%s:no:0x%x,rel_vdoa:%d, rel_ipu:%d\n",
++ __func__, tsk->task_no, rel_vdoa, rel_ipu);
++ mutex_unlock(&tbl->lock);
++}
++
++static int get_vdoa_ipu_res(struct ipu_task_entry *t)
++{
++ int ret;
++ uint32_t found = 0;
++
++ found = _get_vdoa_ipu_res(t);
++ if (!found) {
++ t->ipu_id = -1;
++ t->ipu = NULL;
++ /* blocking to get resource */
++ ret = atomic_inc_return(&req_cnt);
++ dev_dbg(t->dev,
++ "wait_res:no:0x%x,req_cnt:%d\n", t->task_no, ret);
++ ret = wait_event_timeout(res_waitq, _get_vdoa_ipu_res(t),
++ msecs_to_jiffies(t->timeout - DEF_DELAY_MS));
++ if (ret == 0) {
++ dev_err(t->dev, "ERR[0x%p,no-0x%x] wait_res timeout:%dms!\n",
++ t, t->task_no, t->timeout - DEF_DELAY_MS);
++ ret = -ETIMEDOUT;
++ t->state = STATE_RES_TIMEOUT;
++ goto out;
++ } else {
++ if (!(t->set.task & VDOA_ONLY) && (!t->ipu))
++ dev_err(t->dev,
++ "ERR[no-0x%x] can not get ipu!\n",
++ t->task_no);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0)
++ ret = atomic_dec_return(&req_cnt);
++ else
++ dev_err(t->dev,
++ "ERR[no-0x%x] req_cnt:%d mismatch!\n",
++ t->task_no, ret);
++ dev_dbg(t->dev, "no-0x%x,[0x%p],req_cnt:%d, got_res!\n",
++ t->task_no, t, ret);
++ found = 1;
++ }
++ }
++
++out:
++ return found;
++}
++
++static struct ipu_task_entry *create_task_entry(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++
++ tsk = kzalloc(sizeof(struct ipu_task_entry), GFP_KERNEL);
++ if (!tsk)
++ return ERR_PTR(-ENOMEM);
++ kref_init(&tsk->refcount);
++ tsk->state = -EINVAL;
++ tsk->ipu_id = -1;
++ tsk->dev = ipu_dev;
++ tsk->input = task->input;
++ tsk->output = task->output;
++ tsk->overlay_en = task->overlay_en;
++ if (tsk->overlay_en)
++ tsk->overlay = task->overlay;
++ if (task->timeout > DEF_TIMEOUT_MS)
++ tsk->timeout = task->timeout;
++ else
++ tsk->timeout = DEF_TIMEOUT_MS;
++
++ return tsk;
++}
++
++static void task_mem_free(struct kref *ref)
++{
++ struct ipu_task_entry *tsk =
++ container_of(ref, struct ipu_task_entry, refcount);
++ kfree(tsk);
++}
++
++int create_split_child_task(struct ipu_split_task *sp_task)
++{
++ int ret = 0;
++ struct ipu_task_entry *tsk;
++
++ tsk = create_task_entry(&sp_task->task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ sp_task->child_task = tsk;
++ tsk->task_no = sp_task->task_no;
++
++ ret = prepare_task(tsk);
++ if (ret < 0)
++ goto err;
++
++ tsk->parent = sp_task->parent_task;
++ tsk->set.sp_setting = sp_task->parent_task->set.sp_setting;
++
++ list_add(&tsk->node, &tsk->parent->split_list);
++ dev_dbg(tsk->dev, "[0x%p] sp_tsk Q list,no-0x%x\n", tsk, tsk->task_no);
++ tsk->state = STATE_QUEUE;
++ CHECK_PERF(&tsk->ts_queue);
++err:
++ return ret;
++}
++
++static inline int sp_task_check_done(struct ipu_split_task *sp_task,
++ struct ipu_task_entry *parent, int num, int *idx)
++{
++ int i;
++ int ret = 0;
++ struct ipu_task_entry *tsk;
++ struct mutex *lock = &parent->split_lock;
++
++ *idx = -EINVAL;
++ mutex_lock(lock);
++ for (i = 0; i < num; i++) {
++ tsk = sp_task[i].child_task;
++ if (tsk && tsk->split_done) {
++ *idx = i;
++ ret = 1;
++ goto out;
++ }
++ }
++
++out:
++ mutex_unlock(lock);
++ return ret;
++}
++
++static int create_split_task(
++ int stripe,
++ struct ipu_split_task *sp_task)
++{
++ struct ipu_task *task = &(sp_task->task);
++ struct ipu_task_entry *t = sp_task->parent_task;
++ int ret;
++
++ sp_task->task_no |= stripe;
++
++ task->input = t->input;
++ task->output = t->output;
++ task->overlay_en = t->overlay_en;
++ if (task->overlay_en)
++ task->overlay = t->overlay;
++ task->task_id = t->task_id;
++ if ((t->set.split_mode == RL_SPLIT) ||
++ (t->set.split_mode == UD_SPLIT))
++ task->timeout = t->timeout / 2;
++ else
++ task->timeout = t->timeout / 4;
++
++ task->input.crop.w = t->set.sp_setting.iw;
++ task->input.crop.h = t->set.sp_setting.ih;
++ if (task->overlay_en) {
++ task->overlay.crop.w = t->set.sp_setting.ow;
++ task->overlay.crop.h = t->set.sp_setting.oh;
++ }
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ task->output.crop.w = t->set.sp_setting.oh;
++ task->output.crop.h = t->set.sp_setting.ow;
++ t->set.sp_setting.rl_split_line = t->set.sp_setting.o_bottom_pos;
++ t->set.sp_setting.ud_split_line = t->set.sp_setting.o_right_pos;
++
++ } else {
++ task->output.crop.w = t->set.sp_setting.ow;
++ task->output.crop.h = t->set.sp_setting.oh;
++ t->set.sp_setting.rl_split_line = t->set.sp_setting.o_right_pos;
++ t->set.sp_setting.ud_split_line = t->set.sp_setting.o_bottom_pos;
++ }
++
++ if (stripe & LEFT_STRIPE)
++ task->input.crop.pos.x += t->set.sp_setting.i_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->input.crop.pos.x += t->set.sp_setting.i_right_pos;
++ if (stripe & UP_STRIPE)
++ task->input.crop.pos.y += t->set.sp_setting.i_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->input.crop.pos.y += t->set.sp_setting.i_bottom_pos;
++
++ if (task->overlay_en) {
++ if (stripe & LEFT_STRIPE)
++ task->overlay.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->overlay.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->overlay.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->overlay.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ }
++
++ switch (t->output.rotate) {
++ case IPU_ROTATE_NONE:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ break;
++ case IPU_ROTATE_VERT_FLIP:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_right_pos;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ break;
++ case IPU_ROTATE_HORIZ_FLIP:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_bottom_pos;
++ break;
++ case IPU_ROTATE_180:
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ break;
++ case IPU_ROTATE_90_RIGHT:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_right_pos;
++ break;
++ case IPU_ROTATE_90_RIGHT_HFLIP:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_bottom_pos;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_left_pos;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y += t->set.sp_setting.o_right_pos;
++ break;
++ case IPU_ROTATE_90_RIGHT_VFLIP:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_top_pos - t->set.sp_setting.oh;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x =
++ t->output.crop.pos.x + t->output.crop.w
++ - t->set.sp_setting.o_bottom_pos - t->set.sp_setting.oh;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ break;
++ case IPU_ROTATE_90_LEFT:
++ if (stripe & UP_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_top_pos;
++ else if (stripe & DOWN_STRIPE)
++ task->output.crop.pos.x += t->set.sp_setting.o_bottom_pos;
++ if (stripe & LEFT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_left_pos - t->set.sp_setting.ow;
++ else if (stripe & RIGHT_STRIPE)
++ task->output.crop.pos.y =
++ t->output.crop.pos.y + t->output.crop.h
++ - t->set.sp_setting.o_right_pos - t->set.sp_setting.ow;
++ break;
++ default:
++ dev_err(t->dev, "ERR:should not be here\n");
++ break;
++ }
++
++ ret = create_split_child_task(sp_task);
++ if (ret < 0)
++ dev_err(t->dev, "ERR:create_split_child_task() ret:%d\n", ret);
++ return ret;
++}
++
++static int queue_split_task(struct ipu_task_entry *t,
++ struct ipu_split_task *sp_task, uint32_t size)
++{
++ int err[4];
++ int ret = 0;
++ int i, j;
++ struct ipu_task_entry *tsk = NULL;
++ struct mutex *lock = &t->split_lock;
++ struct mutex *vdic_lock = &t->vdic_lock;
++
++ dev_dbg(t->dev, "Split task 0x%p, no-0x%x, size:%d\n",
++ t, t->task_no, size);
++ mutex_init(lock);
++ mutex_init(vdic_lock);
++ init_waitqueue_head(&t->split_waitq);
++ INIT_LIST_HEAD(&t->split_list);
++ for (j = 0; j < size; j++) {
++ memset(&sp_task[j], 0, sizeof(*sp_task));
++ sp_task[j].parent_task = t;
++ sp_task[j].task_no = t->task_no;
++ }
++
++ if (t->set.split_mode == RL_SPLIT) {
++ i = 0;
++ err[i] = create_split_task(RIGHT_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(LEFT_STRIPE, &sp_task[i]);
++ } else if (t->set.split_mode == UD_SPLIT) {
++ i = 0;
++ err[i] = create_split_task(DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(UP_STRIPE, &sp_task[i]);
++ } else {
++ i = 0;
++ err[i] = create_split_task(RIGHT_STRIPE | DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 1;
++ err[i] = create_split_task(LEFT_STRIPE | DOWN_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 2;
++ err[i] = create_split_task(RIGHT_STRIPE | UP_STRIPE, &sp_task[i]);
++ if (err[i] < 0)
++ goto err_start;
++ i = 3;
++ err[i] = create_split_task(LEFT_STRIPE | UP_STRIPE, &sp_task[i]);
++ }
++
++err_start:
++ for (j = 0; j < (i + 1); j++) {
++ if (err[j] < 0) {
++ if (sp_task[j].child_task)
++ dev_err(t->dev,
++ "sp_task[%d],no-0x%x fail state:%d, queue err:%d.\n",
++ j, sp_task[j].child_task->task_no,
++ sp_task[j].child_task->state, err[j]);
++ goto err_exit;
++ }
++ dev_dbg(t->dev, "[0x%p] sp_task[%d], no-0x%x state:%s, queue ret:%d.\n",
++ sp_task[j].child_task, j, sp_task[j].child_task->task_no,
++ state_msg[sp_task[j].child_task->state].msg, err[j]);
++ }
++
++ return ret;
++
++err_exit:
++ for (j = 0; j < (i + 1); j++) {
++ if (err[j] < 0 && !ret)
++ ret = err[j];
++ tsk = sp_task[j].child_task;
++ if (!tsk)
++ continue;
++ kfree(tsk);
++ }
++ t->state = STATE_ERR;
++ return ret;
++
++}
++
++static int init_tiled_buf(struct ipu_soc *ipu, struct ipu_task_entry *t,
++ ipu_channel_t channel, uint32_t ch_type)
++{
++ int ret = 0;
++ int i;
++ uint32_t ipu_fmt;
++ dma_addr_t inbuf_base = 0;
++ u32 field_size;
++ struct vdoa_params param;
++ struct vdoa_ipu_buf buf;
++ struct ipu_soc *ipu_idx;
++ u32 ipu_stride, obuf_size;
++ u32 height, width;
++ ipu_buffer_t type;
++
++ if ((IPU_PIX_FMT_YUYV != t->output.format) &&
++ (IPU_PIX_FMT_NV12 != t->output.format)) {
++ dev_err(t->dev, "ERR:[0x%d] output format\n", t->task_no);
++ return -EINVAL;
++ }
++
++ memset(&param, 0, sizeof(param));
++ /* init channel tiled bufs */
++ if (deinterlace_3_field(t) &&
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ field_size = tiled_filed_size(t);
++ if (INPUT_CHAN_VDI_P == ch_type) {
++ inbuf_base = t->input.paddr + field_size;
++ param.vfield_buf.prev_veba = inbuf_base + t->set.i_off;
++ } else if (INPUT_CHAN == ch_type) {
++ inbuf_base = t->input.paddr_n;
++ param.vfield_buf.cur_veba = inbuf_base + t->set.i_off;
++ } else if (INPUT_CHAN_VDI_N == ch_type) {
++ inbuf_base = t->input.paddr_n + field_size;
++ param.vfield_buf.next_veba = inbuf_base + t->set.i_off;
++ } else
++ return -EINVAL;
++ height = t->input.crop.h >> 1; /* field format for vdoa */
++ width = t->input.crop.w;
++ param.vfield_buf.vubo = t->set.i_uoff;
++ param.interlaced = 1;
++ param.scan_order = 1;
++ type = IPU_INPUT_BUFFER;
++ } else if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) &&
++ (INPUT_CHAN == ch_type)) {
++ height = t->input.crop.h;
++ width = t->input.crop.w;
++ param.vframe_buf.veba = t->input.paddr + t->set.i_off;
++ param.vframe_buf.vubo = t->set.i_uoff;
++ type = IPU_INPUT_BUFFER;
++ } else
++ return -EINVAL;
++
++ param.band_mode = (t->set.mode & VDOA_BAND_MODE) ? 1 : 0;
++ if (param.band_mode && (t->set.band_lines != 3) &&
++ (t->set.band_lines != 4) && (t->set.band_lines != 5))
++ return -EINVAL;
++ else if (param.band_mode)
++ param.band_lines = (1 << t->set.band_lines);
++ for (i = 0; i < max_ipu_no; i++) {
++ ipu_idx = ipu_get_soc(i);
++ if (!IS_ERR(ipu_idx) && ipu_idx == ipu)
++ break;
++ }
++ if (t->set.task & VDOA_ONLY)
++ /* dummy, didn't need ipu res */
++ i = 0;
++ if (max_ipu_no == i) {
++ dev_err(t->dev, "ERR:[0x%p] get ipu num\n", t);
++ return -EINVAL;
++ }
++
++ param.ipu_num = i;
++ param.vpu_stride = t->input.width;
++ param.height = height;
++ param.width = width;
++ if (IPU_PIX_FMT_NV12 == t->output.format)
++ param.pfs = VDOA_PFS_NV12;
++ else
++ param.pfs = VDOA_PFS_YUYV;
++ ipu_fmt = (param.pfs == VDOA_PFS_YUYV) ? IPU_PIX_FMT_YUYV :
++ IPU_PIX_FMT_NV12;
++ ipu_stride = param.width * bytes_per_pixel(ipu_fmt);
++ obuf_size = PAGE_ALIGN(param.width * param.height *
++ fmt_to_bpp(ipu_fmt)/8);
++ dev_dbg(t->dev, "band_mode:%d, band_lines:%d\n",
++ param.band_mode, param.band_lines);
++ if (!param.band_mode) {
++ /* note: if only for tiled -> raster convert and
++ no other post-processing, we don't need alloc buf
++ and use output buffer directly.
++ */
++ if (t->set.task & VDOA_ONLY)
++ param.ieba0 = t->output.paddr;
++ else {
++ dev_err(t->dev, "ERR:[0x%d] vdoa task\n", t->task_no);
++ return -EINVAL;
++ }
++ } else {
++ if (IPU_PIX_FMT_TILED_NV12F != t->input.format) {
++ dev_err(t->dev, "ERR [0x%d] vdoa task\n", t->task_no);
++ return -EINVAL;
++ }
++ }
++ ret = vdoa_setup(t->vdoa_handle, &param);
++ if (ret)
++ goto done;
++ vdoa_get_output_buf(t->vdoa_handle, &buf);
++ if (t->set.task & VDOA_ONLY)
++ goto done;
++
++ ret = ipu_init_channel_buffer(ipu,
++ channel,
++ type,
++ ipu_fmt,
++ width,
++ height,
++ ipu_stride,
++ IPU_ROTATE_NONE,
++ buf.ieba0,
++ buf.ieba1,
++ 0,
++ buf.iubo,
++ 0);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ if (param.band_mode) {
++ ret = ipu_set_channel_bandmode(ipu, channel,
++ type, t->set.band_lines);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BAND_FAIL;
++ goto done;
++ }
++ }
++done:
++ return ret;
++}
++
++static int init_tiled_ch_bufs(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++
++ if (IPU_PIX_FMT_TILED_NV12 == t->input.format) {
++ ret = init_tiled_buf(ipu, t, t->set.ic_chan, INPUT_CHAN);
++ CHECK_RETCODE(ret < 0, "init tiled_ch", t->state, done, ret);
++ } else if (IPU_PIX_FMT_TILED_NV12F == t->input.format) {
++ ret = init_tiled_buf(ipu, t, t->set.ic_chan, INPUT_CHAN);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-c", t->state, done, ret);
++ ret = init_tiled_buf(ipu, t, t->set.vdi_ic_p_chan,
++ INPUT_CHAN_VDI_P);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-p", t->state, done, ret);
++ ret = init_tiled_buf(ipu, t, t->set.vdi_ic_n_chan,
++ INPUT_CHAN_VDI_N);
++ CHECK_RETCODE(ret < 0, "init tiled_ch-n", t->state, done, ret);
++ } else {
++ ret = -EINVAL;
++ dev_err(t->dev, "ERR[no-0x%x] invalid fmt:0x%x!\n",
++ t->task_no, t->input.format);
++ }
++
++done:
++ return ret;
++}
++
++static int init_ic(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++ ipu_channel_params_t params;
++ dma_addr_t inbuf = 0, ovbuf = 0, ov_alp_buf = 0;
++ dma_addr_t inbuf_p = 0, inbuf_n = 0;
++ dma_addr_t outbuf = 0;
++ int out_uoff = 0, out_voff = 0, out_rot;
++ int out_w = 0, out_h = 0, out_stride;
++ int out_fmt;
++ u32 vdi_frame_idx = 0;
++
++ memset(&params, 0, sizeof(params));
++
++ /* is it need link a rot channel */
++ if (ic_and_rot(t->set.mode)) {
++ outbuf = t->set.r_paddr;
++ out_w = t->set.r_width;
++ out_h = t->set.r_height;
++ out_stride = t->set.r_stride;
++ out_fmt = t->set.r_fmt;
++ out_uoff = 0;
++ out_voff = 0;
++ out_rot = IPU_ROTATE_NONE;
++ } else {
++ outbuf = t->output.paddr + t->set.o_off;
++ out_w = t->output.crop.w;
++ out_h = t->output.crop.h;
++ out_stride = t->set.ostride;
++ out_fmt = t->output.format;
++ out_uoff = t->set.o_uoff;
++ out_voff = t->set.o_voff;
++ out_rot = t->output.rotate;
++ }
++
++ /* settings */
++ params.mem_prp_vf_mem.in_width = t->input.crop.w;
++ params.mem_prp_vf_mem.out_width = out_w;
++ params.mem_prp_vf_mem.in_height = t->input.crop.h;
++ params.mem_prp_vf_mem.out_height = out_h;
++ params.mem_prp_vf_mem.in_pixel_fmt = t->input.format;
++ params.mem_prp_vf_mem.out_pixel_fmt = out_fmt;
++ params.mem_prp_vf_mem.motion_sel = t->input.deinterlace.motion;
++
++ params.mem_prp_vf_mem.outh_resize_ratio =
++ t->set.sp_setting.outh_resize_ratio;
++ params.mem_prp_vf_mem.outv_resize_ratio =
++ t->set.sp_setting.outv_resize_ratio;
++
++ if (t->overlay_en) {
++ params.mem_prp_vf_mem.in_g_pixel_fmt = t->overlay.format;
++ params.mem_prp_vf_mem.graphics_combine_en = 1;
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_GLOBAL)
++ params.mem_prp_vf_mem.global_alpha_en = 1;
++ else if (t->overlay.alpha.loc_alp_paddr)
++ params.mem_prp_vf_mem.alpha_chan_en = 1;
++ /* otherwise, alpha bending per pixel is used. */
++ params.mem_prp_vf_mem.alpha = t->overlay.alpha.gvalue;
++ if (t->overlay.colorkey.enable) {
++ params.mem_prp_vf_mem.key_color_en = 1;
++ params.mem_prp_vf_mem.key_color = t->overlay.colorkey.value;
++ }
++ }
++
++ if (t->input.deinterlace.enable) {
++ if (t->input.deinterlace.field_fmt & IPU_DEINTERLACE_FIELD_MASK)
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ else
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++
++ if (t->input.deinterlace.field_fmt & IPU_DEINTERLACE_RATE_EN)
++ vdi_frame_idx = t->input.deinterlace.field_fmt &
++ IPU_DEINTERLACE_RATE_FRAME1;
++ }
++
++ if (t->set.mode & VDOA_MODE)
++ ipu->vdoa_en = 1;
++
++ /* init channels */
++ if (!(t->set.task & VDOA_ONLY)) {
++ ret = ipu_init_channel(ipu, t->set.ic_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ }
++
++ if (deinterlace_3_field(t)) {
++ ret = ipu_init_channel(ipu, t->set.vdi_ic_p_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ ret = ipu_init_channel(ipu, t->set.vdi_ic_n_chan, &params);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++ }
++
++ /* init channel bufs */
++ if ((IPU_PIX_FMT_TILED_NV12 == t->input.format) ||
++ (IPU_PIX_FMT_TILED_NV12F == t->input.format)) {
++ ret = init_tiled_ch_bufs(ipu, t);
++ if (ret < 0)
++ goto done;
++ } else {
++ if ((deinterlace_3_field(t)) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ if (params.mem_prp_vf_mem.field_fmt ==
++ IPU_DEINTERLACE_FIELD_TOP) {
++ if (vdi_frame_idx) {
++ inbuf_p = t->input.paddr + t->set.istride +
++ t->set.i_off;
++ inbuf = t->input.paddr_n + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.istride +
++ t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ } else {
++ inbuf_p = t->input.paddr + t->set.i_off;
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.i_off;
++ }
++ } else {
++ if (vdi_frame_idx) {
++ inbuf_p = t->input.paddr + t->set.i_off;
++ inbuf = t->input.paddr_n + t->set.istride + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++ } else {
++ inbuf_p = t->input.paddr + t->set.istride +
++ t->set.i_off;
++ inbuf = t->input.paddr + t->set.i_off;
++ inbuf_n = t->input.paddr_n + t->set.istride +
++ t->set.i_off;
++ }
++ }
++ } else {
++ if (t->input.deinterlace.enable) {
++ if (params.mem_prp_vf_mem.field_fmt ==
++ IPU_DEINTERLACE_FIELD_TOP) {
++ if (vdi_frame_idx) {
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_BOTTOM;
++ } else
++ inbuf = t->input.paddr + t->set.i_off;
++ } else {
++ if (vdi_frame_idx) {
++ inbuf = t->input.paddr + t->set.i_off;
++ params.mem_prp_vf_mem.field_fmt =
++ IPU_DEINTERLACE_FIELD_TOP;
++ } else
++ inbuf = t->input.paddr + t->set.istride + t->set.i_off;
++ }
++ } else
++ inbuf = t->input.paddr + t->set.i_off;
++ }
++
++ if (t->overlay_en)
++ ovbuf = t->overlay.paddr + t->set.ov_off;
++ }
++ if (t->overlay_en && (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL))
++ ov_alp_buf = t->overlay.alpha.loc_alp_paddr
++ + t->set.ov_alpha_off;
++
++ if ((IPU_PIX_FMT_TILED_NV12 != t->input.format) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++ if (deinterlace_3_field(t) &&
++ (IPU_PIX_FMT_TILED_NV12F != t->input.format)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.vdi_ic_p_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf_p,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.vdi_ic_n_chan,
++ IPU_INPUT_BUFFER,
++ t->input.format,
++ t->input.crop.w,
++ t->input.crop.h,
++ t->set.istride,
++ IPU_ROTATE_NONE,
++ inbuf_n,
++ 0,
++ 0,
++ t->set.i_uoff,
++ t->set.i_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (t->overlay_en) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER,
++ t->overlay.format,
++ t->overlay.crop.w,
++ t->overlay.crop.h,
++ t->set.ovstride,
++ IPU_ROTATE_NONE,
++ ovbuf,
++ 0,
++ 0,
++ t->set.ov_uoff,
++ t->set.ov_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER,
++ IPU_PIX_FMT_GENERIC,
++ t->overlay.crop.w,
++ t->overlay.crop.h,
++ t->set.ov_alpha_stride,
++ IPU_ROTATE_NONE,
++ ov_alp_buf,
++ 0,
++ 0,
++ 0, 0);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if (!(t->set.task & VDOA_ONLY)) {
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.ic_chan,
++ IPU_OUTPUT_BUFFER,
++ out_fmt,
++ out_w,
++ out_h,
++ out_stride,
++ out_rot,
++ outbuf,
++ 0,
++ 0,
++ out_uoff,
++ out_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++ }
++
++ if ((t->set.mode & VDOA_BAND_MODE) && (t->set.task & VDI_VF)) {
++ ret = ipu_link_channels(ipu, MEM_VDOA_MEM, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_link_ch vdoa_ic",
++ STATE_LINK_CHAN_FAIL, done, ret);
++ }
++
++done:
++ return ret;
++}
++
++static void uninit_ic(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret;
++
++ if ((t->set.mode & VDOA_BAND_MODE) && (t->set.task & VDI_VF)) {
++ ret = ipu_unlink_channels(ipu, MEM_VDOA_MEM, t->set.ic_chan);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_unlink_ch vdoa_ic",
++ STATE_UNLINK_CHAN_FAIL, ret);
++ }
++ ipu_uninit_channel(ipu, t->set.ic_chan);
++ if (deinterlace_3_field(t)) {
++ ipu_uninit_channel(ipu, t->set.vdi_ic_p_chan);
++ ipu_uninit_channel(ipu, t->set.vdi_ic_n_chan);
++ }
++}
++
++static int init_rot(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ int ret = 0;
++ dma_addr_t inbuf = 0, outbuf = 0;
++ int in_uoff = 0, in_voff = 0;
++ int in_fmt, in_width, in_height, in_stride;
++
++ /* init channel */
++ ret = ipu_init_channel(ipu, t->set.rot_chan, NULL);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_FAIL;
++ goto done;
++ }
++
++ /* init channel buf */
++ /* is it need link to a ic channel */
++ if (ic_and_rot(t->set.mode)) {
++ in_fmt = t->set.r_fmt;
++ in_width = t->set.r_width;
++ in_height = t->set.r_height;
++ in_stride = t->set.r_stride;
++ inbuf = t->set.r_paddr;
++ in_uoff = 0;
++ in_voff = 0;
++ } else {
++ in_fmt = t->input.format;
++ in_width = t->input.crop.w;
++ in_height = t->input.crop.h;
++ in_stride = t->set.istride;
++ inbuf = t->input.paddr + t->set.i_off;
++ in_uoff = t->set.i_uoff;
++ in_voff = t->set.i_voff;
++ }
++ outbuf = t->output.paddr + t->set.o_off;
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.rot_chan,
++ IPU_INPUT_BUFFER,
++ in_fmt,
++ in_width,
++ in_height,
++ in_stride,
++ t->output.rotate,
++ inbuf,
++ 0,
++ 0,
++ in_uoff,
++ in_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++ ret = ipu_init_channel_buffer(ipu,
++ t->set.rot_chan,
++ IPU_OUTPUT_BUFFER,
++ t->output.format,
++ t->output.crop.w,
++ t->output.crop.h,
++ t->set.ostride,
++ IPU_ROTATE_NONE,
++ outbuf,
++ 0,
++ 0,
++ t->set.o_uoff,
++ t->set.o_voff);
++ if (ret < 0) {
++ t->state = STATE_INIT_CHAN_BUF_FAIL;
++ goto done;
++ }
++
++done:
++ return ret;
++}
++
++static void uninit_rot(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ ipu_uninit_channel(ipu, t->set.rot_chan);
++}
++
++static int get_irq(struct ipu_task_entry *t)
++{
++ int irq;
++ ipu_channel_t chan;
++
++ if (only_ic(t->set.mode))
++ chan = t->set.ic_chan;
++ else
++ chan = t->set.rot_chan;
++
++ switch (chan) {
++ case MEM_ROT_VF_MEM:
++ irq = IPU_IRQ_PRP_VF_ROT_OUT_EOF;
++ break;
++ case MEM_ROT_PP_MEM:
++ irq = IPU_IRQ_PP_ROT_OUT_EOF;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ irq = IPU_IRQ_PRP_VF_OUT_EOF;
++ break;
++ case MEM_PP_MEM:
++ irq = IPU_IRQ_PP_OUT_EOF;
++ break;
++ case MEM_VDI_MEM:
++ irq = IPU_IRQ_VDIC_OUT_EOF;
++ break;
++ default:
++ irq = -EINVAL;
++ }
++
++ return irq;
++}
++
++static irqreturn_t task_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_task_entry *prev_tsk = dev_id;
++
++ CHECK_PERF(&prev_tsk->ts_inirq);
++ complete(&prev_tsk->irq_comp);
++ dev_dbg(prev_tsk->dev, "[0x%p] no-0x%x in-irq!",
++ prev_tsk, prev_tsk->task_no);
++
++ return IRQ_HANDLED;
++}
++
++/* Fix deinterlace up&down split mode medium line */
++static void vdi_split_process(struct ipu_soc *ipu, struct ipu_task_entry *t)
++{
++ u32 vdi_size;
++ u32 vdi_save_lines;
++ u32 stripe_mode;
++ u32 task_no;
++ u32 i, offset_addr;
++ u32 line_size;
++ unsigned char *base_off;
++ struct ipu_task_entry *parent = t->parent;
++ struct mutex *lock = &parent->vdic_lock;
++
++ if (!parent) {
++ dev_err(t->dev, "ERR[0x%x]invalid parent\n", t->task_no);
++ return;
++ }
++ mutex_lock(lock);
++ stripe_mode = t->task_no & 0xf;
++ task_no = t->task_no >> 4;
++
++ /* Save both luma and chroma part for interleaved YUV(e.g. YUYV).
++ * Save luma part for non-interleaved and partial-interleaved
++ * YUV format (e.g NV12 and YV12). */
++ if (t->output.format == IPU_PIX_FMT_YUYV ||
++ t->output.format == IPU_PIX_FMT_UYVY)
++ line_size = t->output.crop.w * fmt_to_bpp(t->output.format)/8;
++ else
++ line_size = t->output.crop.w;
++
++ vdi_save_lines = (t->output.crop.h - t->set.sp_setting.ud_split_line)/2;
++ vdi_size = vdi_save_lines * line_size;
++ if (vdi_save_lines <= 0) {
++ dev_err(t->dev, "[0x%p] vdi_save_line error\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++
++ /*check vditmpbuf buffer have alloced or buffer size is changed */
++ if ((vdi_save_lines != parent->old_save_lines) ||
++ (vdi_size != parent->old_size)) {
++ if (parent->vditmpbuf[0] != NULL)
++ kfree(parent->vditmpbuf[0]);
++ if (parent->vditmpbuf[1] != NULL)
++ kfree(parent->vditmpbuf[1]);
++
++ parent->vditmpbuf[0] = kmalloc(vdi_size, GFP_KERNEL);
++ if (parent->vditmpbuf[0] == NULL) {
++ dev_err(t->dev,
++ "[0x%p]Falied Alloc vditmpbuf[0]\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++ memset(parent->vditmpbuf[0], 0, vdi_size);
++
++ parent->vditmpbuf[1] = kmalloc(vdi_size, GFP_KERNEL);
++ if (parent->vditmpbuf[1] == NULL) {
++ dev_err(t->dev,
++ "[0x%p]Falied Alloc vditmpbuf[1]\n", (void *)t);
++ mutex_unlock(lock);
++ return;
++ }
++ memset(parent->vditmpbuf[1], 0, vdi_size);
++
++ parent->old_save_lines = vdi_save_lines;
++ parent->old_size = vdi_size;
++ }
++
++ if (pfn_valid(t->output.paddr >> PAGE_SHIFT)) {
++ base_off = page_address(pfn_to_page(t->output.paddr >> PAGE_SHIFT));
++ base_off += t->output.paddr & ((1 << PAGE_SHIFT) - 1);
++ } else {
++ base_off = (char *)ioremap_nocache(t->output.paddr,
++ t->output.width * t->output.height *
++ fmt_to_bpp(t->output.format)/8);
++ }
++ if (base_off == NULL) {
++ dev_err(t->dev, "ERR[0x%p]Failed get virtual address\n", t);
++ mutex_unlock(lock);
++ return;
++ }
++
++ /* UP stripe or UP&LEFT stripe */
++ if ((stripe_mode == UP_STRIPE) ||
++ (stripe_mode == (UP_STRIPE | LEFT_STRIPE))) {
++ if (!parent->buf0filled) {
++ offset_addr = t->set.o_off +
++ t->set.sp_setting.ud_split_line*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[0] + i*line_size,
++ base_off + offset_addr +
++ i*t->set.ostride, line_size);
++ parent->buf0filled = true;
++ } else {
++ offset_addr = t->set.o_off + (t->output.crop.h -
++ vdi_save_lines) * t->set.ostride;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[0] + i*line_size, line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf0filled = false;
++ }
++ }
++ /*Down stripe or Down&Left stripe*/
++ else if ((stripe_mode == DOWN_STRIPE) ||
++ (stripe_mode == (DOWN_STRIPE | LEFT_STRIPE))) {
++ if (!parent->buf0filled) {
++ offset_addr = t->set.o_off + vdi_save_lines*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[0] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf0filled = true;
++ } else {
++ offset_addr = t->set.o_off;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[0] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf0filled = false;
++ }
++ }
++ /*Up&Right stripe*/
++ else if (stripe_mode == (UP_STRIPE | RIGHT_STRIPE)) {
++ if (!parent->buf1filled) {
++ offset_addr = t->set.o_off +
++ t->set.sp_setting.ud_split_line*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_size);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_size);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[1] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf1filled = true;
++ } else {
++ offset_addr = t->set.o_off +
++ (t->output.crop.h - vdi_save_lines)*t->set.ostride;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[1] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + i*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + i*t->set.ostride);
++ parent->buf1filled = false;
++ }
++ }
++ /*Down stripe or Down&Right stript*/
++ else if (stripe_mode == (DOWN_STRIPE | RIGHT_STRIPE)) {
++ if (!parent->buf1filled) {
++ offset_addr = t->set.o_off + vdi_save_lines*t->set.ostride;
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_save_lines*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_save_lines*t->set.ostride);
++
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(parent->vditmpbuf[1] + i*line_size,
++ base_off + offset_addr + i*t->set.ostride,
++ line_size);
++ parent->buf1filled = true;
++ } else {
++ offset_addr = t->set.o_off;
++ for (i = 0; i < vdi_save_lines; i++)
++ memcpy(base_off + offset_addr + i*t->set.ostride,
++ parent->vditmpbuf[1] + i*line_size,
++ line_size);
++
++ dmac_flush_range(base_off + offset_addr,
++ base_off + offset_addr + vdi_save_lines*t->set.ostride);
++ outer_flush_range(t->output.paddr + offset_addr,
++ t->output.paddr + offset_addr + vdi_save_lines*t->set.ostride);
++ parent->buf1filled = false;
++ }
++ }
++ if (!pfn_valid(t->output.paddr >> PAGE_SHIFT))
++ iounmap(base_off);
++ mutex_unlock(lock);
++}
++
++static void do_task_release(struct ipu_task_entry *t, int fail)
++{
++ int ret;
++ struct ipu_soc *ipu = t->ipu;
++
++ if (t->input.deinterlace.enable && !fail &&
++ (t->task_no & (UP_STRIPE | DOWN_STRIPE)))
++ vdi_split_process(ipu, t);
++
++ ipu_free_irq(ipu, t->irq, t);
++
++ if (t->vdoa_dma.vaddr)
++ dma_free_coherent(t->dev,
++ t->vdoa_dma.size,
++ t->vdoa_dma.vaddr,
++ t->vdoa_dma.paddr);
++
++ if (only_ic(t->set.mode)) {
++ ret = ipu_disable_channel(ipu, t->set.ic_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_p_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic_p",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_n_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_ic_n",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ }
++ } else if (only_rot(t->set.mode)) {
++ ret = ipu_disable_channel(ipu, t->set.rot_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch only_rot",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ ret = ipu_unlink_channels(ipu, t->set.ic_chan, t->set.rot_chan);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_unlink_ch",
++ STATE_UNLINK_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.rot_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch ic_and_rot-rot",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.ic_chan, true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch ic_and_rot-ic",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_p_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch icrot-ic-p",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ ret = ipu_disable_channel(ipu, t->set.vdi_ic_n_chan,
++ true);
++ CHECK_RETCODE_CONT(ret < 0, "ipu_disable_ch icrot-ic-n",
++ STATE_DISABLE_CHAN_FAIL, ret);
++ }
++ }
++
++ if (only_ic(t->set.mode))
++ uninit_ic(ipu, t);
++ else if (only_rot(t->set.mode))
++ uninit_rot(ipu, t);
++ else if (ic_and_rot(t->set.mode)) {
++ uninit_ic(ipu, t);
++ uninit_rot(ipu, t);
++ }
++
++ t->state = STATE_OK;
++ CHECK_PERF(&t->ts_rel);
++ return;
++}
++
++static void do_task_vdoa_only(struct ipu_task_entry *t)
++{
++ int ret;
++
++ ret = init_tiled_ch_bufs(NULL, t);
++ CHECK_RETCODE(ret < 0, "do_vdoa_only", STATE_ERR, out, ret);
++ ret = vdoa_start(t->vdoa_handle, VDOA_DEF_TIMEOUT_MS);
++ vdoa_stop(t->vdoa_handle);
++ CHECK_RETCODE(ret < 0, "vdoa_wait4complete, do_vdoa_only",
++ STATE_VDOA_IRQ_TIMEOUT, out, ret);
++
++ t->state = STATE_OK;
++out:
++ return;
++}
++
++static void do_task(struct ipu_task_entry *t)
++{
++ int r_size;
++ int irq;
++ int ret;
++ uint32_t busy;
++ struct ipu_soc *ipu = t->ipu;
++
++ CHECK_PERF(&t->ts_dotask);
++
++ if (!ipu) {
++ t->state = STATE_NO_IPU;
++ return;
++ }
++
++ init_completion(&t->irq_comp);
++ dev_dbg(ipu->dev, "[0x%p]Do task no:0x%x: id %d\n", (void *)t,
++ t->task_no, t->task_id);
++ dump_task_info(t);
++
++ if (t->set.task & IC_PP) {
++ t->set.ic_chan = MEM_PP_MEM;
++ dev_dbg(ipu->dev, "[0x%p]ic channel MEM_PP_MEM\n", (void *)t);
++ } else if (t->set.task & IC_VF) {
++ t->set.ic_chan = MEM_PRP_VF_MEM;
++ dev_dbg(ipu->dev, "[0x%p]ic channel MEM_PRP_VF_MEM\n", (void *)t);
++ } else if (t->set.task & VDI_VF) {
++ if (t->set.mode & VDOA_BAND_MODE) {
++ t->set.ic_chan = MEM_VDI_MEM;
++ if (deinterlace_3_field(t)) {
++ t->set.vdi_ic_p_chan = MEM_VDI_MEM_P;
++ t->set.vdi_ic_n_chan = MEM_VDI_MEM_N;
++ }
++ dev_dbg(ipu->dev, "[0x%p]ic ch MEM_VDI_MEM\n",
++ (void *)t);
++ } else {
++ t->set.ic_chan = MEM_VDI_PRP_VF_MEM;
++ if (deinterlace_3_field(t)) {
++ t->set.vdi_ic_p_chan = MEM_VDI_PRP_VF_MEM_P;
++ t->set.vdi_ic_n_chan = MEM_VDI_PRP_VF_MEM_N;
++ }
++ dev_dbg(ipu->dev,
++ "[0x%p]ic ch MEM_VDI_PRP_VF_MEM\n", t);
++ }
++ }
++
++ if (t->set.task & ROT_PP) {
++ t->set.rot_chan = MEM_ROT_PP_MEM;
++ dev_dbg(ipu->dev, "[0x%p]rot channel MEM_ROT_PP_MEM\n", (void *)t);
++ } else if (t->set.task & ROT_VF) {
++ t->set.rot_chan = MEM_ROT_VF_MEM;
++ dev_dbg(ipu->dev, "[0x%p]rot channel MEM_ROT_VF_MEM\n", (void *)t);
++ }
++
++ if (t->task_id == IPU_TASK_ID_VF)
++ busy = ic_vf_pp_is_busy(ipu, true);
++ else if (t->task_id == IPU_TASK_ID_PP)
++ busy = ic_vf_pp_is_busy(ipu, false);
++ else {
++ dev_err(ipu->dev, "ERR[no:0x%x]ipu task_id:%d invalid!\n",
++ t->task_no, t->task_id);
++ return;
++ }
++ if (busy) {
++ dev_err(ipu->dev, "ERR[0x%p-no:0x%x]ipu task_id:%d busy!\n",
++ (void *)t, t->task_no, t->task_id);
++ t->state = STATE_IPU_BUSY;
++ return;
++ }
++
++ irq = get_irq(t);
++ if (irq < 0) {
++ t->state = STATE_NO_IRQ;
++ return;
++ }
++ t->irq = irq;
++
++ /* channel setup */
++ if (only_ic(t->set.mode)) {
++ dev_dbg(t->dev, "[0x%p]only ic mode\n", (void *)t);
++ ret = init_ic(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_ic only_ic",
++ t->state, chan_setup, ret);
++ } else if (only_rot(t->set.mode)) {
++ dev_dbg(t->dev, "[0x%p]only rot mode\n", (void *)t);
++ ret = init_rot(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_rot only_rot",
++ t->state, chan_setup, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ int rot_idx = (t->task_id == IPU_TASK_ID_VF) ? 0 : 1;
++
++ dev_dbg(t->dev, "[0x%p]ic + rot mode\n", (void *)t);
++ t->set.r_fmt = t->output.format;
++ if (t->output.rotate >= IPU_ROTATE_90_RIGHT) {
++ t->set.r_width = t->output.crop.h;
++ t->set.r_height = t->output.crop.w;
++ } else {
++ t->set.r_width = t->output.crop.w;
++ t->set.r_height = t->output.crop.h;
++ }
++ t->set.r_stride = t->set.r_width *
++ bytes_per_pixel(t->set.r_fmt);
++ r_size = PAGE_ALIGN(t->set.r_width * t->set.r_height
++ * fmt_to_bpp(t->set.r_fmt)/8);
++
++ if (r_size > ipu->rot_dma[rot_idx].size) {
++ dev_dbg(t->dev, "[0x%p]realloc rot buffer\n", (void *)t);
++
++ if (ipu->rot_dma[rot_idx].vaddr)
++ dma_free_coherent(t->dev,
++ ipu->rot_dma[rot_idx].size,
++ ipu->rot_dma[rot_idx].vaddr,
++ ipu->rot_dma[rot_idx].paddr);
++
++ ipu->rot_dma[rot_idx].size = r_size;
++ ipu->rot_dma[rot_idx].vaddr = dma_zalloc_coherent(t->dev,
++ r_size,
++ &ipu->rot_dma[rot_idx].paddr,
++ GFP_DMA | GFP_KERNEL);
++ CHECK_RETCODE(ipu->rot_dma[rot_idx].vaddr == NULL,
++ "ic_and_rot", STATE_SYS_NO_MEM,
++ chan_setup, -ENOMEM);
++ }
++ t->set.r_paddr = ipu->rot_dma[rot_idx].paddr;
++
++ dev_dbg(t->dev, "[0x%p]rotation:\n", (void *)t);
++ dev_dbg(t->dev, "[0x%p]\tformat = 0x%x\n", (void *)t, t->set.r_fmt);
++ dev_dbg(t->dev, "[0x%p]\twidth = %d\n", (void *)t, t->set.r_width);
++ dev_dbg(t->dev, "[0x%p]\theight = %d\n", (void *)t, t->set.r_height);
++ dev_dbg(t->dev, "[0x%p]\tpaddr = 0x%x\n", (void *)t, t->set.r_paddr);
++ dev_dbg(t->dev, "[0x%p]\trstride = %d\n", (void *)t, t->set.r_stride);
++
++ ret = init_ic(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_ic ic_and_rot",
++ t->state, chan_setup, ret);
++ ret = init_rot(ipu, t);
++ CHECK_RETCODE(ret < 0, "init_rot ic_and_rot",
++ t->state, chan_setup, ret);
++ ret = ipu_link_channels(ipu, t->set.ic_chan,
++ t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_link_ch ic_and_rot",
++ STATE_LINK_CHAN_FAIL, chan_setup, ret);
++ } else {
++ dev_err(t->dev, "ERR [0x%p]do task: should not be here\n", t);
++ t->state = STATE_ERR;
++ return;
++ }
++
++ ret = ipu_request_irq(ipu, irq, task_irq_handler, 0, NULL, t);
++ CHECK_RETCODE(ret < 0, "ipu_req_irq",
++ STATE_IRQ_FAIL, chan_setup, ret);
++
++ /* enable/start channel */
++ if (only_ic(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_p_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic_p",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_n_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_ic_n",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ }
++
++ ret = ipu_select_buffer(ipu, t->set.ic_chan, IPU_OUTPUT_BUFFER,
++ 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay_en) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_g",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_a",
++ STATE_SEL_BUF_FAIL, chan_buf,
++ ret);
++ }
++ }
++ if (!(t->set.mode & VDOA_BAND_MODE)) {
++ if (deinterlace_3_field(t))
++ ipu_select_multi_vdi_buffer(ipu, 0);
++ else {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_ic_i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ }
++ }
++ } else if (only_rot(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch only_rot",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_rot_o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf only_rot_i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ } else if (ic_and_rot(t->set.mode)) {
++ ret = ipu_enable_channel(ipu, t->set.rot_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-rot",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.ic_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-ic",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ if (deinterlace_3_field(t)) {
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_p_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-p",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ ret = ipu_enable_channel(ipu, t->set.vdi_ic_n_chan);
++ CHECK_RETCODE(ret < 0, "ipu_enable_ch ic_and_rot-n",
++ STATE_ENABLE_CHAN_FAIL, chan_en, ret);
++ }
++
++ ret = ipu_select_buffer(ipu, t->set.rot_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-rot-o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay_en) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_GRAPH_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-g",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (t->overlay.alpha.mode == IPU_ALPHA_MODE_LOCAL) {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_ALPHA_IN_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf icrot-ic-a",
++ STATE_SEL_BUF_FAIL,
++ chan_buf, ret);
++ }
++ }
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_OUTPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-o",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ if (deinterlace_3_field(t))
++ ipu_select_multi_vdi_buffer(ipu, 0);
++ else {
++ ret = ipu_select_buffer(ipu, t->set.ic_chan,
++ IPU_INPUT_BUFFER, 0);
++ CHECK_RETCODE(ret < 0, "ipu_sel_buf ic_and_rot-ic-i",
++ STATE_SEL_BUF_FAIL, chan_buf, ret);
++ }
++ }
++
++ if (need_split(t))
++ t->state = STATE_IN_PROGRESS;
++
++ if (t->set.mode & VDOA_BAND_MODE) {
++ ret = vdoa_start(t->vdoa_handle, VDOA_DEF_TIMEOUT_MS);
++ CHECK_RETCODE(ret < 0, "vdoa_wait4complete, do_vdoa_band",
++ STATE_VDOA_IRQ_TIMEOUT, chan_rel, ret);
++ }
++
++ CHECK_PERF(&t->ts_waitirq);
++ ret = wait_for_completion_timeout(&t->irq_comp,
++ msecs_to_jiffies(t->timeout - DEF_DELAY_MS));
++ CHECK_PERF(&t->ts_wakeup);
++ CHECK_RETCODE(ret == 0, "wait_for_comp_timeout",
++ STATE_IRQ_TIMEOUT, chan_rel, ret);
++ dev_dbg(t->dev, "[0x%p] no-0x%x ipu irq done!", t, t->task_no);
++
++chan_rel:
++chan_buf:
++chan_en:
++chan_setup:
++ if (t->set.mode & VDOA_BAND_MODE)
++ vdoa_stop(t->vdoa_handle);
++ do_task_release(t, t->state >= STATE_ERR);
++ return;
++}
++
++static void do_task_vdoa_vdi(struct ipu_task_entry *t)
++{
++ int i;
++ int ret;
++ u32 stripe_width;
++
++ /* FIXME: crop mode not support now */
++ stripe_width = t->input.width >> 1;
++ t->input.crop.pos.x = 0;
++ t->input.crop.pos.y = 0;
++ t->input.crop.w = stripe_width;
++ t->input.crop.h = t->input.height;
++ t->output.crop.w = stripe_width;
++ t->output.crop.h = t->input.height;
++
++ for (i = 0; i < 2; i++) {
++ t->input.crop.pos.x = t->input.crop.pos.x + i * stripe_width;
++ t->output.crop.pos.x = t->output.crop.pos.x + i * stripe_width;
++ /* check input */
++ ret = set_crop(&t->input.crop, t->input.width, t->input.height,
++ t->input.format);
++ if (ret < 0) {
++ ret = STATE_ERR;
++ goto done;
++ } else
++ update_offset(t->input.format,
++ t->input.width, t->input.height,
++ t->input.crop.pos.x,
++ t->input.crop.pos.y,
++ &t->set.i_off, &t->set.i_uoff,
++ &t->set.i_voff, &t->set.istride);
++ dev_dbg(t->dev, "i_off:0x%x, i_uoff:0x%x, istride:%d.\n",
++ t->set.i_off, t->set.i_uoff, t->set.istride);
++ /* check output */
++ ret = set_crop(&t->output.crop, t->input.width,
++ t->output.height, t->output.format);
++ if (ret < 0) {
++ ret = STATE_ERR;
++ goto done;
++ } else
++ update_offset(t->output.format,
++ t->output.width, t->output.height,
++ t->output.crop.pos.x,
++ t->output.crop.pos.y,
++ &t->set.o_off, &t->set.o_uoff,
++ &t->set.o_voff, &t->set.ostride);
++
++ dev_dbg(t->dev, "o_off:0x%x, o_uoff:0x%x, ostride:%d.\n",
++ t->set.o_off, t->set.o_uoff, t->set.ostride);
++
++ do_task(t);
++ }
++
++ return;
++done:
++ dev_err(t->dev, "ERR %s set_crop.\n", __func__);
++ t->state = ret;
++ return;
++}
++
++static void get_res_do_task(struct ipu_task_entry *t)
++{
++ uint32_t found;
++ uint32_t split_child;
++ struct mutex *lock;
++
++ found = get_vdoa_ipu_res(t);
++ if (!found) {
++ dev_err(t->dev, "ERR:[0x%p] no-0x%x can not get res\n",
++ t, t->task_no);
++ return;
++ } else {
++ if (t->set.task & VDOA_ONLY)
++ do_task_vdoa_only(t);
++ else if ((IPU_PIX_FMT_TILED_NV12F == t->input.format) &&
++ (t->set.mode & VDOA_BAND_MODE) &&
++ (t->input.crop.w > soc_max_vdi_in_width()))
++ do_task_vdoa_vdi(t);
++ else
++ do_task(t);
++ put_vdoa_ipu_res(t, 0);
++ }
++ if (t->state != STATE_OK) {
++ dev_err(t->dev, "ERR:[0x%p] no-0x%x state: %s\n",
++ t, t->task_no, state_msg[t->state].msg);
++ }
++
++ split_child = need_split(t) && t->parent;
++ if (split_child) {
++ lock = &t->parent->split_lock;
++ mutex_lock(lock);
++ t->split_done = 1;
++ mutex_unlock(lock);
++ wake_up(&t->parent->split_waitq);
++ }
++
++ return;
++}
++
++static void wait_split_task_complete(struct ipu_task_entry *parent,
++ struct ipu_split_task *sp_task, uint32_t size)
++{
++ struct ipu_task_entry *tsk = NULL;
++ int ret = 0, rc;
++ int j, idx = -1;
++ unsigned long flags;
++ struct mutex *lock = &parent->split_lock;
++ int k, busy_vf, busy_pp;
++ struct ipu_soc *ipu;
++ DECLARE_PERF_VAR;
++
++ for (j = 0; j < size; j++) {
++ rc = wait_event_timeout(
++ parent->split_waitq,
++ sp_task_check_done(sp_task, parent, size, &idx),
++ msecs_to_jiffies(parent->timeout - DEF_DELAY_MS));
++ if (!rc) {
++ dev_err(parent->dev,
++ "ERR:[0x%p] no-0x%x, split_task timeout,j:%d,"
++ "size:%d.\n",
++ parent, parent->task_no, j, size);
++ ret = -ETIMEDOUT;
++ goto out;
++ } else {
++ if (idx < 0) {
++ dev_err(parent->dev,
++ "ERR:[0x%p] no-0x%x, invalid task idx:%d\n",
++ parent, parent->task_no, idx);
++ continue;
++ }
++ tsk = sp_task[idx].child_task;
++ mutex_lock(lock);
++ if (!tsk->split_done || !tsk->ipu)
++ dev_err(tsk->dev,
++ "ERR:no-0x%x,split not done:%d/null ipu:0x%p\n",
++ tsk->task_no, tsk->split_done, tsk->ipu);
++ tsk->split_done = 0;
++ mutex_unlock(lock);
++
++ dev_dbg(tsk->dev,
++ "[0x%p] no-0x%x sp_tsk[%d] done,state:%d.\n",
++ tsk, tsk->task_no, idx, tsk->state);
++ #ifdef DBG_IPU_PERF
++ CHECK_PERF(&tsk->ts_rel);
++ PRINT_TASK_STATISTICS;
++ #endif
++ }
++ }
++
++out:
++ if (ret == -ETIMEDOUT) {
++ /* debug */
++ for (k = 0; k < max_ipu_no; k++) {
++ ipu = ipu_get_soc(k);
++ if (IS_ERR(ipu)) {
++ dev_err(parent->dev, "no:0x%x, null ipu:%d\n",
++ parent->task_no, k);
++ } else {
++ busy_vf = ic_vf_pp_is_busy(ipu, true);
++ busy_pp = ic_vf_pp_is_busy(ipu, false);
++ dev_err(parent->dev,
++ "ERR:ipu[%d] busy_vf:%d, busy_pp:%d.\n",
++ k, busy_vf, busy_pp);
++ }
++ }
++ for (k = 0; k < size; k++) {
++ tsk = sp_task[k].child_task;
++ if (!tsk)
++ continue;
++ dev_err(parent->dev,
++ "ERR: sp_task[%d][0x%p] no-0x%x done:%d,"
++ "state:%s,on_list:%d, ipu:0x%p,timeout!\n",
++ k, tsk, tsk->task_no, tsk->split_done,
++ state_msg[tsk->state].msg, tsk->task_in_list,
++ tsk->ipu);
++ }
++ }
++
++ for (j = 0; j < size; j++) {
++ tsk = sp_task[j].child_task;
++ if (!tsk)
++ continue;
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ dev_dbg(tsk->dev,
++ "[0x%p] no-0x%x,id:%d sp_tsk timeout list_del.\n",
++ tsk, tsk->task_no, tsk->task_id);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++ if (!tsk->ipu)
++ continue;
++ if (tsk->state != STATE_OK) {
++ dev_err(tsk->dev,
++ "ERR:[0x%p] no-0x%x,id:%d, sp_tsk state: %s\n",
++ tsk, tsk->task_no, tsk->task_id,
++ state_msg[tsk->state].msg);
++ }
++ kref_put(&tsk->refcount, task_mem_free);
++ }
++
++ kfree(parent->vditmpbuf[0]);
++ kfree(parent->vditmpbuf[1]);
++
++ if (ret < 0)
++ parent->state = STATE_TIMEOUT;
++ else
++ parent->state = STATE_OK;
++ return;
++}
++
++static inline int find_task(struct ipu_task_entry **t, int thread_id)
++{
++ int found;
++ unsigned long flags;
++ struct ipu_task_entry *tsk;
++ struct list_head *task_list = &ipu_task_list;
++
++ *t = NULL;
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ found = !list_empty(task_list);
++ if (found) {
++ tsk = list_first_entry(task_list, struct ipu_task_entry, node);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ *t = tsk;
++ kref_get(&tsk->refcount);
++ dev_dbg(tsk->dev,
++ "thread_id:%d,[0x%p] task_no:0x%x,mode:0x%x list_del\n",
++ thread_id, tsk, tsk->task_no, tsk->set.mode);
++ } else
++ dev_err(tsk->dev,
++ "thread_id:%d,task_no:0x%x,mode:0x%x not on list_del\n",
++ thread_id, tsk->task_no, tsk->set.mode);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++
++ return found;
++}
++
++static int ipu_task_thread(void *argv)
++{
++ struct ipu_task_entry *tsk;
++ struct ipu_task_entry *sp_tsk0;
++ struct ipu_split_task sp_task[4];
++ /* priority lower than irq_thread */
++ const struct sched_param param = {
++ .sched_priority = MAX_USER_RT_PRIO/2 - 1,
++ };
++ int ret;
++ int curr_thread_id;
++ uint32_t size;
++ unsigned long flags;
++ unsigned int cpu;
++ struct cpumask cpu_mask;
++ struct ipu_thread_data *data = (struct ipu_thread_data *)argv;
++
++ thread_id++;
++ curr_thread_id = thread_id;
++ sched_setscheduler(current, SCHED_FIFO, &param);
++
++ if (!data->is_vdoa) {
++ cpu = cpumask_first(cpu_online_mask);
++ cpumask_set_cpu(cpu, &cpu_mask);
++ ret = sched_setaffinity(data->ipu->thread[data->id]->pid,
++ &cpu_mask);
++ if (ret < 0) {
++ pr_err("%s: sched_setaffinity fail:%d.\n", __func__, ret);
++ }
++ pr_debug("%s: sched_setaffinity cpu:%d.\n", __func__, cpu);
++ }
++
++ while (!kthread_should_stop()) {
++ int split_fail = 0;
++ int split_parent;
++ int split_child;
++
++ wait_event_interruptible(thread_waitq, find_task(&tsk, curr_thread_id));
++
++ if (!tsk) {
++ pr_err("thread:%d can not find task.\n",
++ curr_thread_id);
++ continue;
++ }
++
++ /* note: other threads run split child task */
++ split_parent = need_split(tsk) && !tsk->parent;
++ split_child = need_split(tsk) && tsk->parent;
++ if (split_parent) {
++ if ((tsk->set.split_mode == RL_SPLIT) ||
++ (tsk->set.split_mode == UD_SPLIT))
++ size = 2;
++ else
++ size = 4;
++ ret = queue_split_task(tsk, sp_task, size);
++ if (ret < 0) {
++ split_fail = 1;
++ } else {
++ struct list_head *pos;
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++
++ sp_tsk0 = list_first_entry(&tsk->split_list,
++ struct ipu_task_entry, node);
++ list_del(&sp_tsk0->node);
++
++ list_for_each(pos, &tsk->split_list) {
++ struct ipu_task_entry *tmp;
++
++ tmp = list_entry(pos,
++ struct ipu_task_entry, node);
++ tmp->task_in_list = 1;
++ dev_dbg(tmp->dev,
++ "[0x%p] no-0x%x,id:%d sp_tsk "
++ "add_to_list.\n", tmp,
++ tmp->task_no, tmp->task_id);
++ }
++ /* add to global list */
++ list_splice(&tsk->split_list, &ipu_task_list);
++
++ spin_unlock_irqrestore(&ipu_task_list_lock,
++ flags);
++ /* let the parent thread do the first sp_task */
++ /* FIXME: ensure the correct sequence for split
++ 4size: 5/6->9/a*/
++ if (!sp_tsk0)
++ dev_err(tsk->dev,
++ "ERR: no-0x%x,can not get split_tsk0\n",
++ tsk->task_no);
++ wake_up_interruptible(&thread_waitq);
++ get_res_do_task(sp_tsk0);
++ dev_dbg(sp_tsk0->dev,
++ "thread:%d complete tsk no:0x%x.\n",
++ curr_thread_id, sp_tsk0->task_no);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0) {
++ wake_up(&res_waitq);
++ dev_dbg(sp_tsk0->dev,
++ "sp_tsk0 sche thread:%d no:0x%x,"
++ "req_cnt:%d\n", curr_thread_id,
++ sp_tsk0->task_no, ret);
++ /* For other threads to get_res */
++ schedule();
++ }
++ }
++ } else
++ get_res_do_task(tsk);
++
++ /* wait for all 4 sp_task finished here or timeout
++ and then release all resources */
++ if (split_parent && !split_fail)
++ wait_split_task_complete(tsk, sp_task, size);
++
++ if (!split_child) {
++ atomic_inc(&tsk->done);
++ wake_up(&tsk->task_waitq);
++ }
++
++ dev_dbg(tsk->dev, "thread:%d complete tsk no:0x%x-[0x%p].\n",
++ curr_thread_id, tsk->task_no, tsk);
++ ret = atomic_read(&req_cnt);
++ if (ret > 0) {
++ wake_up(&res_waitq);
++ dev_dbg(tsk->dev, "sche thread:%d no:0x%x,req_cnt:%d\n",
++ curr_thread_id, tsk->task_no, ret);
++ /* note: give cpu to other threads to get_res */
++ schedule();
++ }
++
++ kref_put(&tsk->refcount, task_mem_free);
++ }
++
++ pr_info("ERR %s exit.\n", __func__);
++ return 0;
++}
++
++int ipu_check_task(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++ int ret = 0;
++
++ tsk = create_task_entry(task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ ret = check_task(tsk);
++
++ task->input = tsk->input;
++ task->output = tsk->output;
++ task->overlay = tsk->overlay;
++ dump_task_info(tsk);
++
++ kref_put(&tsk->refcount, task_mem_free);
++ if (ret != 0)
++ pr_debug("%s ret:%d.\n", __func__, ret);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_check_task);
++
++int ipu_queue_task(struct ipu_task *task)
++{
++ struct ipu_task_entry *tsk;
++ unsigned long flags;
++ int ret;
++ u32 tmp_task_no;
++ DECLARE_PERF_VAR;
++
++ tsk = create_task_entry(task);
++ if (IS_ERR(tsk))
++ return PTR_ERR(tsk);
++
++ CHECK_PERF(&tsk->ts_queue);
++ ret = prepare_task(tsk);
++ if (ret < 0)
++ goto done;
++
++ if (need_split(tsk)) {
++ CHECK_PERF(&tsk->ts_dotask);
++ CHECK_PERF(&tsk->ts_waitirq);
++ CHECK_PERF(&tsk->ts_inirq);
++ CHECK_PERF(&tsk->ts_wakeup);
++ }
++
++ /* task_no last four bits for split task type*/
++ tmp_task_no = atomic_inc_return(&frame_no);
++ tsk->task_no = tmp_task_no << 4;
++ init_waitqueue_head(&tsk->task_waitq);
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ list_add_tail(&tsk->node, &ipu_task_list);
++ tsk->task_in_list = 1;
++ dev_dbg(tsk->dev, "[0x%p,no-0x%x] list_add_tail\n", tsk, tsk->task_no);
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++ wake_up_interruptible(&thread_waitq);
++
++ ret = wait_event_timeout(tsk->task_waitq, atomic_read(&tsk->done),
++ msecs_to_jiffies(tsk->timeout));
++ if (0 == ret) {
++ /* note: the timeout should larger than the internal timeout!*/
++ ret = -ETIMEDOUT;
++ dev_err(tsk->dev, "ERR: [0x%p] no-0x%x, timeout:%dms!\n",
++ tsk, tsk->task_no, tsk->timeout);
++ } else {
++ if (STATE_OK != tsk->state) {
++ dev_err(tsk->dev, "ERR: [0x%p] no-0x%x,state %d: %s\n",
++ tsk, tsk->task_no, tsk->state,
++ state_msg[tsk->state].msg);
++ ret = -ECANCELED;
++ } else
++ ret = 0;
++ }
++
++ spin_lock_irqsave(&ipu_task_list_lock, flags);
++ if (tsk->task_in_list) {
++ list_del(&tsk->node);
++ tsk->task_in_list = 0;
++ dev_dbg(tsk->dev, "[0x%p] no:0x%x list_del\n",
++ tsk, tsk->task_no);
++ }
++ spin_unlock_irqrestore(&ipu_task_list_lock, flags);
++
++#ifdef DBG_IPU_PERF
++ CHECK_PERF(&tsk->ts_rel);
++ PRINT_TASK_STATISTICS;
++ if (ts_frame_avg == 0)
++ ts_frame_avg = ts_frame.tv_nsec / NSEC_PER_USEC +
++ ts_frame.tv_sec * USEC_PER_SEC;
++ else
++ ts_frame_avg = (ts_frame_avg + ts_frame.tv_nsec / NSEC_PER_USEC
++ + ts_frame.tv_sec * USEC_PER_SEC)/2;
++ if (timespec_compare(&ts_frame, &ts_frame_max) > 0)
++ ts_frame_max = ts_frame;
++
++ atomic_inc(&frame_cnt);
++
++ if ((atomic_read(&frame_cnt) % 1000) == 0)
++ pr_debug("ipu_dev: max frame time:%ldus, avg frame time:%dus,"
++ "frame_cnt:%d\n", ts_frame_max.tv_nsec / NSEC_PER_USEC
++ + ts_frame_max.tv_sec * USEC_PER_SEC,
++ ts_frame_avg, atomic_read(&frame_cnt));
++#endif
++done:
++ if (ret < 0)
++ dev_err(tsk->dev, "ERR: no-0x%x,ipu_queue_task err:%d\n",
++ tsk->task_no, ret);
++
++ kref_put(&tsk->refcount, task_mem_free);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(ipu_queue_task);
++
++static int mxc_ipu_open(struct inode *inode, struct file *file)
++{
++ file->private_data = (void *)atomic_inc_return(&file_index);
++ return 0;
++}
++
++static long mxc_ipu_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int __user *argp = (void __user *)arg;
++ int ret = 0;
++
++ switch (cmd) {
++ case IPU_CHECK_TASK:
++ {
++ struct ipu_task task;
++
++ if (copy_from_user
++ (&task, (struct ipu_task *) arg,
++ sizeof(struct ipu_task)))
++ return -EFAULT;
++ ret = ipu_check_task(&task);
++ if (copy_to_user((struct ipu_task *) arg,
++ &task, sizeof(struct ipu_task)))
++ return -EFAULT;
++ break;
++ }
++ case IPU_QUEUE_TASK:
++ {
++ struct ipu_task task;
++
++ if (copy_from_user
++ (&task, (struct ipu_task *) arg,
++ sizeof(struct ipu_task)))
++ return -EFAULT;
++ ret = ipu_queue_task(&task);
++ break;
++ }
++ case IPU_ALLOC:
++ {
++ int size;
++ struct ipu_alloc_list *mem;
++
++ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
++ if (mem == NULL)
++ return -ENOMEM;
++
++ if (get_user(size, argp))
++ return -EFAULT;
++
++ mem->size = PAGE_ALIGN(size);
++
++ mem->cpu_addr = dma_zalloc_coherent(ipu_dev, size,
++ &mem->phy_addr,
++ GFP_DMA | GFP_KERNEL);
++ if (mem->cpu_addr == NULL) {
++ kfree(mem);
++ return -ENOMEM;
++ }
++ mem->file_index = file->private_data;
++ mutex_lock(&ipu_alloc_lock);
++ list_add(&mem->list, &ipu_alloc_list);
++ mutex_unlock(&ipu_alloc_lock);
++
++ dev_dbg(ipu_dev, "allocated %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ if (put_user(mem->phy_addr, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case IPU_FREE:
++ {
++ unsigned long offset;
++ struct ipu_alloc_list *mem;
++
++ if (get_user(offset, argp))
++ return -EFAULT;
++
++ ret = -EINVAL;
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry(mem, &ipu_alloc_list, list) {
++ if (mem->phy_addr == offset) {
++ list_del(&mem->list);
++ dma_free_coherent(ipu_dev,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ kfree(mem);
++ ret = 0;
++ break;
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ if (0 == ret)
++ dev_dbg(ipu_dev, "free %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ break;
++ }
++ default:
++ break;
++ }
++ return ret;
++}
++
++static int mxc_ipu_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ bool found = false;
++ u32 len;
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ struct ipu_alloc_list *mem;
++
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry(mem, &ipu_alloc_list, list) {
++ if (offset == mem->phy_addr) {
++ found = true;
++ len = mem->size;
++ break;
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ if (!found)
++ return -EINVAL;
++
++ if (vma->vm_end - vma->vm_start > len)
++ return -EINVAL;
++
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot)) {
++ printk(KERN_ERR
++ "mmap failed!\n");
++ return -ENOBUFS;
++ }
++ return 0;
++}
++
++static int mxc_ipu_release(struct inode *inode, struct file *file)
++{
++ struct ipu_alloc_list *mem;
++ struct ipu_alloc_list *n;
++
++ mutex_lock(&ipu_alloc_lock);
++ list_for_each_entry_safe(mem, n, &ipu_alloc_list, list) {
++ if ((mem->cpu_addr != 0) &&
++ (file->private_data == mem->file_index)) {
++ list_del(&mem->list);
++ dma_free_coherent(ipu_dev,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ dev_dbg(ipu_dev, "rel-free %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++ kfree(mem);
++ }
++ }
++ mutex_unlock(&ipu_alloc_lock);
++ atomic_dec(&file_index);
++
++ return 0;
++}
++
++static struct file_operations mxc_ipu_fops = {
++ .owner = THIS_MODULE,
++ .open = mxc_ipu_open,
++ .mmap = mxc_ipu_mmap,
++ .release = mxc_ipu_release,
++ .unlocked_ioctl = mxc_ipu_ioctl,
++};
++
++int register_ipu_device(struct ipu_soc *ipu, int id)
++{
++ int ret = 0;
++ static int idx;
++ static struct ipu_thread_data thread_data[5];
++
++ if (!major) {
++ major = register_chrdev(0, "mxc_ipu", &mxc_ipu_fops);
++ if (major < 0) {
++ printk(KERN_ERR "Unable to register mxc_ipu as a char device\n");
++ ret = major;
++ goto register_cdev_fail;
++ }
++
++ ipu_class = class_create(THIS_MODULE, "mxc_ipu");
++ if (IS_ERR(ipu_class)) {
++ ret = PTR_ERR(ipu_class);
++ goto ipu_class_fail;
++ }
++
++ ipu_dev = device_create(ipu_class, NULL, MKDEV(major, 0),
++ NULL, "mxc_ipu");
++ if (IS_ERR(ipu_dev)) {
++ ret = PTR_ERR(ipu_dev);
++ goto dev_create_fail;
++ }
++ ipu_dev->dma_mask = kmalloc(sizeof(*ipu_dev->dma_mask), GFP_KERNEL);
++ *ipu_dev->dma_mask = DMA_BIT_MASK(32);
++ ipu_dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ mutex_init(&ipu_ch_tbl.lock);
++ }
++ max_ipu_no = ++id;
++ ipu->rot_dma[0].size = 0;
++ ipu->rot_dma[1].size = 0;
++
++ thread_data[idx].ipu = ipu;
++ thread_data[idx].id = 0;
++ thread_data[idx].is_vdoa = 0;
++ ipu->thread[0] = kthread_run(ipu_task_thread, &thread_data[idx++],
++ "ipu%d_task", id);
++ if (IS_ERR(ipu->thread[0])) {
++ ret = PTR_ERR(ipu->thread[0]);
++ goto kthread0_fail;
++ }
++
++ thread_data[idx].ipu = ipu;
++ thread_data[idx].id = 1;
++ thread_data[idx].is_vdoa = 0;
++ ipu->thread[1] = kthread_run(ipu_task_thread, &thread_data[idx++],
++ "ipu%d_task", id);
++ if (IS_ERR(ipu->thread[1])) {
++ ret = PTR_ERR(ipu->thread[1]);
++ goto kthread1_fail;
++ }
++
++
++ return ret;
++
++kthread1_fail:
++ kthread_stop(ipu->thread[0]);
++kthread0_fail:
++ if (id == 0)
++ device_destroy(ipu_class, MKDEV(major, 0));
++dev_create_fail:
++ if (id == 0) {
++ class_destroy(ipu_class);
++ }
++ipu_class_fail:
++ if (id == 0)
++ unregister_chrdev(major, "mxc_ipu");
++register_cdev_fail:
++ return ret;
++}
++
++void unregister_ipu_device(struct ipu_soc *ipu, int id)
++{
++ int i;
++
++ kthread_stop(ipu->thread[0]);
++ kthread_stop(ipu->thread[1]);
++ for (i = 0; i < 2; i++) {
++ if (ipu->rot_dma[i].vaddr)
++ dma_free_coherent(ipu_dev,
++ ipu->rot_dma[i].size,
++ ipu->rot_dma[i].vaddr,
++ ipu->rot_dma[i].paddr);
++ }
++
++ if (major) {
++ device_destroy(ipu_class, MKDEV(major, 0));
++ class_destroy(ipu_class);
++ unregister_chrdev(major, "mxc_ipu");
++ major = 0;
++ }
++}
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_disp.c linux-xbian-imx6/drivers/mxc/ipu3/ipu_disp.c
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_disp.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_disp.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,1956 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_disp.c
++ *
++ * @brief IPU display submodule API functions
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/clk.h>
++#include <linux/clk-provider.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/errno.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++
++#include <asm/atomic.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++struct dp_csc_param_t {
++ int mode;
++ void *coeff;
++};
++
++#define SYNC_WAVE 0
++#define NULL_WAVE (-1)
++#define ASYNC_SER_WAVE 6
++
++/* DC display ID assignments */
++#define DC_DISP_ID_SYNC(di) (di)
++#define DC_DISP_ID_SERIAL 2
++#define DC_DISP_ID_ASYNC 3
++
++int dmfc_type_setup;
++
++void _ipu_dmfc_init(struct ipu_soc *ipu, int dmfc_type, int first)
++{
++ u32 dmfc_wr_chan, dmfc_dp_chan;
++
++ if (first) {
++ if (dmfc_type_setup > dmfc_type)
++ dmfc_type = dmfc_type_setup;
++ else
++ dmfc_type_setup = dmfc_type;
++
++ /* disable DMFC-IC channel*/
++ ipu_dmfc_write(ipu, 0x2, DMFC_IC_CTRL);
++ } else if (dmfc_type_setup >= DMFC_HIGH_RESOLUTION_DC) {
++ dev_dbg(ipu->dev, "DMFC high resolution has set, will not change\n");
++ return;
++ } else
++ dmfc_type_setup = dmfc_type;
++
++ if (dmfc_type == DMFC_HIGH_RESOLUTION_DC) {
++ /* 1 - segment 0~3;
++ * 5B - segement 4, 5;
++ * 5F - segement 6, 7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC DC HIGH RESOLUTION: 1(0~3), 5B(4,5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000088;
++ dmfc_dp_chan = 0x00009694;
++ ipu->dmfc_size_28 = 256*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 128*4;
++ } else if (dmfc_type == DMFC_HIGH_RESOLUTION_DP) {
++ /* 1 - segment 0, 1;
++ * 5B - segement 2~5;
++ * 5F - segement 6,7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC DP HIGH RESOLUTION: 1(0,1), 5B(2~5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000090;
++ dmfc_dp_chan = 0x0000968a;
++ ipu->dmfc_size_28 = 128*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 256*4;
++ } else if (dmfc_type == DMFC_HIGH_RESOLUTION_ONLY_DP) {
++ /* 5B - segement 0~3;
++ * 5F - segement 4~7;
++ * 1, 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC ONLY-DP HIGH RESOLUTION: 5B(0~3), 5F(4~7)\n");
++ dmfc_wr_chan = 0x00000000;
++ dmfc_dp_chan = 0x00008c88;
++ ipu->dmfc_size_28 = 0;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 256*4;
++ ipu->dmfc_size_23 = 256*4;
++ } else {
++ /* 1 - segment 0, 1;
++ * 5B - segement 4, 5;
++ * 5F - segement 6, 7;
++ * 1C, 2C and 6B, 6F unused;
++ */
++ dev_info(ipu->dev, "IPU DMFC NORMAL mode: 1(0~1), 5B(4,5), 5F(6,7)\n");
++ dmfc_wr_chan = 0x00000090;
++ dmfc_dp_chan = 0x00009694;
++ ipu->dmfc_size_28 = 128*4;
++ ipu->dmfc_size_29 = 0;
++ ipu->dmfc_size_24 = 0;
++ ipu->dmfc_size_27 = 128*4;
++ ipu->dmfc_size_23 = 128*4;
++ }
++ ipu_dmfc_write(ipu, dmfc_wr_chan, DMFC_WR_CHAN);
++ ipu_dmfc_write(ipu, 0x202020F6, DMFC_WR_CHAN_DEF);
++ ipu_dmfc_write(ipu, dmfc_dp_chan, DMFC_DP_CHAN);
++ /* Enable chan 5 watermark set at 5 bursts and clear at 7 bursts */
++ ipu_dmfc_write(ipu, 0x2020F6F6, DMFC_DP_CHAN_DEF);
++}
++
++static int __init dmfc_setup(char *options)
++{
++ get_option(&options, &dmfc_type_setup);
++ if (dmfc_type_setup > DMFC_HIGH_RESOLUTION_ONLY_DP)
++ dmfc_type_setup = DMFC_HIGH_RESOLUTION_ONLY_DP;
++ return 1;
++}
++__setup("dmfc=", dmfc_setup);
++
++void _ipu_dmfc_set_wait4eot(struct ipu_soc *ipu, int dma_chan, int width)
++{
++ u32 dmfc_gen1 = ipu_dmfc_read(ipu, DMFC_GENERAL1);
++
++ if (width >= HIGH_RESOLUTION_WIDTH) {
++ if (dma_chan == 23)
++ _ipu_dmfc_init(ipu, DMFC_HIGH_RESOLUTION_DP, 0);
++ else if (dma_chan == 28)
++ _ipu_dmfc_init(ipu, DMFC_HIGH_RESOLUTION_DC, 0);
++ }
++
++ if (dma_chan == 23) { /*5B*/
++ if (ipu->dmfc_size_23/width > 3)
++ dmfc_gen1 |= 1UL << 20;
++ else
++ dmfc_gen1 &= ~(1UL << 20);
++ } else if (dma_chan == 24) { /*6B*/
++ if (ipu->dmfc_size_24/width > 1)
++ dmfc_gen1 |= 1UL << 22;
++ else
++ dmfc_gen1 &= ~(1UL << 22);
++ } else if (dma_chan == 27) { /*5F*/
++ if (ipu->dmfc_size_27/width > 2)
++ dmfc_gen1 |= 1UL << 21;
++ else
++ dmfc_gen1 &= ~(1UL << 21);
++ } else if (dma_chan == 28) { /*1*/
++ if (ipu->dmfc_size_28/width > 2)
++ dmfc_gen1 |= 1UL << 16;
++ else
++ dmfc_gen1 &= ~(1UL << 16);
++ } else if (dma_chan == 29) { /*6F*/
++ if (ipu->dmfc_size_29/width > 1)
++ dmfc_gen1 |= 1UL << 23;
++ else
++ dmfc_gen1 &= ~(1UL << 23);
++ }
++
++ ipu_dmfc_write(ipu, dmfc_gen1, DMFC_GENERAL1);
++}
++
++void _ipu_dmfc_set_burst_size(struct ipu_soc *ipu, int dma_chan, int burst_size)
++{
++ u32 dmfc_wr_chan = ipu_dmfc_read(ipu, DMFC_WR_CHAN);
++ u32 dmfc_dp_chan = ipu_dmfc_read(ipu, DMFC_DP_CHAN);
++ int dmfc_bs = 0;
++
++ switch (burst_size) {
++ case 64:
++ dmfc_bs = 0x40;
++ break;
++ case 32:
++ case 20:
++ dmfc_bs = 0x80;
++ break;
++ case 16:
++ dmfc_bs = 0xc0;
++ break;
++ default:
++ dev_err(ipu->dev, "Unsupported burst size %d\n",
++ burst_size);
++ return;
++ }
++
++ if (dma_chan == 23) { /*5B*/
++ dmfc_dp_chan &= ~(0xc0);
++ dmfc_dp_chan |= dmfc_bs;
++ } else if (dma_chan == 27) { /*5F*/
++ dmfc_dp_chan &= ~(0xc000);
++ dmfc_dp_chan |= (dmfc_bs << 8);
++ } else if (dma_chan == 28) { /*1*/
++ dmfc_wr_chan &= ~(0xc0);
++ dmfc_wr_chan |= dmfc_bs;
++ }
++
++ ipu_dmfc_write(ipu, dmfc_wr_chan, DMFC_WR_CHAN);
++ ipu_dmfc_write(ipu, dmfc_dp_chan, DMFC_DP_CHAN);
++}
++
++static void _ipu_di_data_wave_config(struct ipu_soc *ipu,
++ int di, int wave_gen,
++ int access_size, int component_size)
++{
++ u32 reg;
++ reg = (access_size << DI_DW_GEN_ACCESS_SIZE_OFFSET) |
++ (component_size << DI_DW_GEN_COMPONENT_SIZE_OFFSET);
++ ipu_di_write(ipu, di, reg, DI_DW_GEN(wave_gen));
++}
++
++static void _ipu_di_data_pin_config(struct ipu_soc *ipu,
++ int di, int wave_gen, int di_pin, int set,
++ int up, int down)
++{
++ u32 reg;
++
++ reg = ipu_di_read(ipu, di, DI_DW_GEN(wave_gen));
++ reg &= ~(0x3 << (di_pin * 2));
++ reg |= set << (di_pin * 2);
++ ipu_di_write(ipu, di, reg, DI_DW_GEN(wave_gen));
++
++ ipu_di_write(ipu, di, (down << 16) | up, DI_DW_SET(wave_gen, set));
++}
++
++static void _ipu_di_sync_config(struct ipu_soc *ipu,
++ int di, int wave_gen,
++ int run_count, int run_src,
++ int offset_count, int offset_src,
++ int repeat_count, int cnt_clr_src,
++ int cnt_polarity_gen_en,
++ int cnt_polarity_clr_src,
++ int cnt_polarity_trigger_src,
++ int cnt_up, int cnt_down)
++{
++ u32 reg;
++
++ if ((run_count >= 0x1000) || (offset_count >= 0x1000) || (repeat_count >= 0x1000) ||
++ (cnt_up >= 0x400) || (cnt_down >= 0x400)) {
++ dev_err(ipu->dev, "DI%d counters out of range.\n", di);
++ return;
++ }
++
++ reg = (run_count << 19) | (++run_src << 16) |
++ (offset_count << 3) | ++offset_src;
++ ipu_di_write(ipu, di, reg, DI_SW_GEN0(wave_gen));
++ reg = (cnt_polarity_gen_en << 29) | (++cnt_clr_src << 25) |
++ (++cnt_polarity_trigger_src << 12) | (++cnt_polarity_clr_src << 9);
++ reg |= (cnt_down << 16) | cnt_up;
++ if (repeat_count == 0) {
++ /* Enable auto reload */
++ reg |= 0x10000000;
++ }
++ ipu_di_write(ipu, di, reg, DI_SW_GEN1(wave_gen));
++ reg = ipu_di_read(ipu, di, DI_STP_REP(wave_gen));
++ reg &= ~(0xFFFF << (16 * ((wave_gen - 1) & 0x1)));
++ reg |= repeat_count << (16 * ((wave_gen - 1) & 0x1));
++ ipu_di_write(ipu, di, reg, DI_STP_REP(wave_gen));
++}
++
++static void _ipu_dc_map_link(struct ipu_soc *ipu,
++ int current_map,
++ int base_map_0, int buf_num_0,
++ int base_map_1, int buf_num_1,
++ int base_map_2, int buf_num_2)
++{
++ int ptr_0 = base_map_0 * 3 + buf_num_0;
++ int ptr_1 = base_map_1 * 3 + buf_num_1;
++ int ptr_2 = base_map_2 * 3 + buf_num_2;
++ int ptr;
++ u32 reg;
++ ptr = (ptr_2 << 10) + (ptr_1 << 5) + ptr_0;
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(current_map));
++ reg &= ~(0x1F << ((16 * (current_map & 0x1))));
++ reg |= ptr << ((16 * (current_map & 0x1)));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_PTR(current_map));
++}
++
++static void _ipu_dc_map_config(struct ipu_soc *ipu,
++ int map, int byte_num, int offset, int mask)
++{
++ int ptr = map * 3 + byte_num;
++ u32 reg;
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_VAL(ptr));
++ reg &= ~(0xFFFF << (16 * (ptr & 0x1)));
++ reg |= ((offset << 8) | mask) << (16 * (ptr & 0x1));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_VAL(ptr));
++
++ reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(map));
++ reg &= ~(0x1F << ((16 * (map & 0x1)) + (5 * byte_num)));
++ reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num));
++ ipu_dc_write(ipu, reg, DC_MAP_CONF_PTR(map));
++}
++
++static void _ipu_dc_map_clear(struct ipu_soc *ipu, int map)
++{
++ u32 reg = ipu_dc_read(ipu, DC_MAP_CONF_PTR(map));
++ ipu_dc_write(ipu, reg & ~(0xFFFF << (16 * (map & 0x1))),
++ DC_MAP_CONF_PTR(map));
++}
++
++static void _ipu_dc_write_tmpl(struct ipu_soc *ipu,
++ int word, u32 opcode, u32 operand, int map,
++ int wave, int glue, int sync, int stop)
++{
++ u32 reg;
++
++ if (opcode == WRG) {
++ reg = sync;
++ reg |= (glue << 4);
++ reg |= (++wave << 11);
++ reg |= ((operand & 0x1FFFF) << 15);
++ ipu_dc_tmpl_write(ipu, reg, word * 8);
++
++ reg = (operand >> 17);
++ reg |= opcode << 7;
++ reg |= (stop << 9);
++ ipu_dc_tmpl_write(ipu, reg, word * 8 + 4);
++ } else {
++ reg = sync;
++ reg |= (glue << 4);
++ reg |= (++wave << 11);
++ reg |= (++map << 15);
++ reg |= (operand << 20) & 0xFFF00000;
++ ipu_dc_tmpl_write(ipu, reg, word * 8);
++
++ reg = (operand >> 12);
++ reg |= opcode << 4;
++ reg |= (stop << 9);
++ ipu_dc_tmpl_write(ipu, reg, word * 8 + 4);
++ }
++}
++
++static void _ipu_dc_link_event(struct ipu_soc *ipu,
++ int chan, int event, int addr, int priority)
++{
++ u32 reg;
++ u32 address_shift;
++ if (event < DC_EVEN_UGDE0) {
++ reg = ipu_dc_read(ipu, DC_RL_CH(chan, event));
++ reg &= ~(0xFFFF << (16 * (event & 0x1)));
++ reg |= ((addr << 8) | priority) << (16 * (event & 0x1));
++ ipu_dc_write(ipu, reg, DC_RL_CH(chan, event));
++ } else {
++ reg = ipu_dc_read(ipu, DC_UGDE_0((event - DC_EVEN_UGDE0) / 2));
++ if ((event - DC_EVEN_UGDE0) & 0x1) {
++ reg &= ~(0x2FF << 16);
++ reg |= (addr << 16);
++ reg |= priority ? (2 << 24) : 0x0;
++ } else {
++ reg &= ~0xFC00FFFF;
++ if (priority)
++ chan = (chan >> 1) +
++ ((((chan & 0x1) + ((chan & 0x2) >> 1))) | (chan >> 3));
++ else
++ chan = 0x7;
++ address_shift = ((event - DC_EVEN_UGDE0) >> 1) ? 7 : 8;
++ reg |= (addr << address_shift) | (priority << 3) | chan;
++ }
++ ipu_dc_write(ipu, reg, DC_UGDE_0((event - DC_EVEN_UGDE0) / 2));
++ }
++}
++
++/* Y = R * 1.200 + G * 2.343 + B * .453 + 0.250;
++ U = R * -.672 + G * -1.328 + B * 2.000 + 512.250.;
++ V = R * 2.000 + G * -1.672 + B * -.328 + 512.250.;*/
++static const int rgb2ycbcr_coeff[5][3] = {
++ {0x4D, 0x96, 0x1D},
++ {-0x2B, -0x55, 0x80},
++ {0x80, -0x6B, -0x15},
++ {0x0000, 0x0200, 0x0200}, /* B0, B1, B2 */
++ {0x2, 0x2, 0x2}, /* S0, S1, S2 */
++};
++
++/* R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
++ G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
++ B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); */
++static const int ycbcr2rgb_coeff[5][3] = {
++ {0x095, 0x000, 0x0CC},
++ {0x095, 0x3CE, 0x398},
++ {0x095, 0x0FF, 0x000},
++ {0x3E42, 0x010A, 0x3DD6}, /*B0,B1,B2 */
++ {0x1, 0x1, 0x1}, /*S0,S1,S2 */
++};
++
++#define mask_a(a) ((u32)(a) & 0x3FF)
++#define mask_b(b) ((u32)(b) & 0x3FFF)
++
++/* Pls keep S0, S1 and S2 as 0x2 by using this convertion */
++static int _rgb_to_yuv(int n, int red, int green, int blue)
++{
++ int c;
++ c = red * rgb2ycbcr_coeff[n][0];
++ c += green * rgb2ycbcr_coeff[n][1];
++ c += blue * rgb2ycbcr_coeff[n][2];
++ c /= 16;
++ c += rgb2ycbcr_coeff[3][n] * 4;
++ c += 8;
++ c /= 16;
++ if (c < 0)
++ c = 0;
++ if (c > 255)
++ c = 255;
++ return c;
++}
++
++/*
++ * Row is for BG: RGB2YUV YUV2RGB RGB2RGB YUV2YUV CSC_NONE
++ * Column is for FG: RGB2YUV YUV2RGB RGB2RGB YUV2YUV CSC_NONE
++ */
++static struct dp_csc_param_t dp_csc_array[CSC_NUM][CSC_NUM] = {
++{{DP_COM_CONF_CSC_DEF_BOTH, &rgb2ycbcr_coeff}, {0, 0}, {0, 0}, {DP_COM_CONF_CSC_DEF_BG, &rgb2ycbcr_coeff}, {DP_COM_CONF_CSC_DEF_BG, &rgb2ycbcr_coeff} },
++{{0, 0}, {DP_COM_CONF_CSC_DEF_BOTH, &ycbcr2rgb_coeff}, {DP_COM_CONF_CSC_DEF_BG, &ycbcr2rgb_coeff}, {0, 0}, {DP_COM_CONF_CSC_DEF_BG, &ycbcr2rgb_coeff} },
++{{0, 0}, {DP_COM_CONF_CSC_DEF_FG, &ycbcr2rgb_coeff}, {0, 0}, {0, 0}, {0, 0} },
++{{DP_COM_CONF_CSC_DEF_FG, &rgb2ycbcr_coeff}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
++{{DP_COM_CONF_CSC_DEF_FG, &rgb2ycbcr_coeff}, {DP_COM_CONF_CSC_DEF_FG, &ycbcr2rgb_coeff}, {0, 0}, {0, 0}, {0, 0} }
++};
++
++void __ipu_dp_csc_setup(struct ipu_soc *ipu,
++ int dp, struct dp_csc_param_t dp_csc_param,
++ bool srm_mode_update)
++{
++ u32 reg;
++ const int (*coeff)[5][3];
++
++ if (dp_csc_param.mode >= 0) {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(dp));
++ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
++ reg |= dp_csc_param.mode;
++ ipu_dp_write(ipu, reg, DP_COM_CONF(dp));
++ }
++
++ coeff = dp_csc_param.coeff;
++
++ if (coeff) {
++ ipu_dp_write(ipu, mask_a((*coeff)[0][0]) |
++ (mask_a((*coeff)[0][1]) << 16), DP_CSC_A_0(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[0][2]) |
++ (mask_a((*coeff)[1][0]) << 16), DP_CSC_A_1(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[1][1]) |
++ (mask_a((*coeff)[1][2]) << 16), DP_CSC_A_2(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[2][0]) |
++ (mask_a((*coeff)[2][1]) << 16), DP_CSC_A_3(dp));
++ ipu_dp_write(ipu, mask_a((*coeff)[2][2]) |
++ (mask_b((*coeff)[3][0]) << 16) |
++ ((*coeff)[4][0] << 30), DP_CSC_0(dp));
++ ipu_dp_write(ipu, mask_b((*coeff)[3][1]) | ((*coeff)[4][1] << 14) |
++ (mask_b((*coeff)[3][2]) << 16) |
++ ((*coeff)[4][2] << 30), DP_CSC_1(dp));
++ }
++
++ if (srm_mode_update) {
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++}
++
++int _ipu_dp_init(struct ipu_soc *ipu,
++ ipu_channel_t channel, uint32_t in_pixel_fmt,
++ uint32_t out_pixel_fmt)
++{
++ int in_fmt, out_fmt;
++ int dp;
++ int partial = false;
++ uint32_t reg;
++
++ if (channel == MEM_FG_SYNC) {
++ dp = DP_SYNC;
++ partial = true;
++ } else if (channel == MEM_BG_SYNC) {
++ dp = DP_SYNC;
++ partial = false;
++ } else if (channel == MEM_BG_ASYNC0) {
++ dp = DP_ASYNC0;
++ partial = false;
++ } else {
++ return -EINVAL;
++ }
++
++ in_fmt = format_to_colorspace(in_pixel_fmt);
++ out_fmt = format_to_colorspace(out_pixel_fmt);
++
++ if (partial) {
++ if (in_fmt == RGB) {
++ if (out_fmt == RGB)
++ ipu->fg_csc_type = RGB2RGB;
++ else
++ ipu->fg_csc_type = RGB2YUV;
++ } else {
++ if (out_fmt == RGB)
++ ipu->fg_csc_type = YUV2RGB;
++ else
++ ipu->fg_csc_type = YUV2YUV;
++ }
++ } else {
++ if (in_fmt == RGB) {
++ if (out_fmt == RGB)
++ ipu->bg_csc_type = RGB2RGB;
++ else
++ ipu->bg_csc_type = RGB2YUV;
++ } else {
++ if (out_fmt == RGB)
++ ipu->bg_csc_type = YUV2RGB;
++ else
++ ipu->bg_csc_type = YUV2YUV;
++ }
++ }
++
++ /* Transform color key from rgb to yuv if CSC is enabled */
++ reg = ipu_dp_read(ipu, DP_COM_CONF(dp));
++ if (ipu->color_key_4rgb && (reg & DP_COM_CONF_GWCKE) &&
++ (((ipu->fg_csc_type == RGB2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == RGB2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2RGB) && (ipu->bg_csc_type == YUV2RGB)))) {
++ int red, green, blue;
++ int y, u, v;
++ uint32_t color_key = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(dp)) & 0xFFFFFFL;
++
++ dev_dbg(ipu->dev, "_ipu_dp_init color key 0x%x need change to yuv fmt!\n", color_key);
++
++ red = (color_key >> 16) & 0xFF;
++ green = (color_key >> 8) & 0xFF;
++ blue = color_key & 0xFF;
++
++ y = _rgb_to_yuv(0, red, green, blue);
++ u = _rgb_to_yuv(1, red, green, blue);
++ v = _rgb_to_yuv(2, red, green, blue);
++ color_key = (y << 16) | (u << 8) | v;
++
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(dp)) & 0xFF000000L;
++ ipu_dp_write(ipu, reg | color_key, DP_GRAPH_WIND_CTRL(dp));
++ ipu->color_key_4rgb = false;
++
++ dev_dbg(ipu->dev, "_ipu_dp_init color key change to yuv fmt 0x%x!\n", color_key);
++ }
++
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_array[ipu->bg_csc_type][ipu->fg_csc_type], true);
++
++ return 0;
++}
++
++void _ipu_dp_uninit(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int dp;
++ int partial = false;
++
++ if (channel == MEM_FG_SYNC) {
++ dp = DP_SYNC;
++ partial = true;
++ } else if (channel == MEM_BG_SYNC) {
++ dp = DP_SYNC;
++ partial = false;
++ } else if (channel == MEM_BG_ASYNC0) {
++ dp = DP_ASYNC0;
++ partial = false;
++ } else {
++ return;
++ }
++
++ if (partial)
++ ipu->fg_csc_type = CSC_NONE;
++ else
++ ipu->bg_csc_type = CSC_NONE;
++
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_array[ipu->bg_csc_type][ipu->fg_csc_type], false);
++}
++
++void _ipu_dc_init(struct ipu_soc *ipu, int dc_chan, int di, bool interlaced, uint32_t pixel_fmt)
++{
++ u32 reg = 0;
++
++ if ((dc_chan == 1) || (dc_chan == 5)) {
++ if (interlaced) {
++ if (di) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 1, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 1, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 1, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 9, 5);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 8, 5);
++ }
++ } else {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 0, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 0, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 0, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 10, 5);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 11, 5);
++ }
++ }
++ } else {
++ if (di) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 2, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 3, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 1, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 9, 5);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 8, 5);
++ }
++ } else {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 5, 3);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 6, 2);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 12, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 10, 5);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 11, 5);
++ }
++ }
++ }
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR, 0, 0);
++
++ reg = 0x2;
++ reg |= DC_DISP_ID_SYNC(di) << DC_WR_CH_CONF_PROG_DISP_ID_OFFSET;
++ reg |= di << 2;
++ if (interlaced)
++ reg |= DC_WR_CH_CONF_FIELD_MODE;
++ } else if ((dc_chan == 8) || (dc_chan == 9)) {
++ /* async channels */
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_0, 0x64, 1);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_1, 0x64, 1);
++
++ reg = 0x3;
++ reg |= DC_DISP_ID_SERIAL << DC_WR_CH_CONF_PROG_DISP_ID_OFFSET;
++ }
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ ipu_dc_write(ipu, 0x00000000, DC_WR_CH_ADDR(dc_chan));
++
++ ipu_dc_write(ipu, 0x00000084, DC_GEN);
++}
++
++void _ipu_dc_uninit(struct ipu_soc *ipu, int dc_chan)
++{
++ if ((dc_chan == 1) || (dc_chan == 5)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NL, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOL, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOF, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_EOFIELD, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_ODD_UGDE1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVEN_UGDE1, 0, 0);
++ } else if ((dc_chan == 8) || (dc_chan == 9)) {
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_W_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_ADDR_R_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_CHAN_R_1, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_R_0, 0, 0);
++ _ipu_dc_link_event(ipu, dc_chan, DC_EVT_NEW_DATA_R_1, 0, 0);
++ }
++}
++
++int _ipu_disp_chan_is_interlaced(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ if (channel == MEM_DC_SYNC)
++ return !!(ipu_dc_read(ipu, DC_WR_CH_CONF_1) &
++ DC_WR_CH_CONF_FIELD_MODE);
++ else if ((channel == MEM_BG_SYNC) || (channel == MEM_FG_SYNC))
++ return !!(ipu_dc_read(ipu, DC_WR_CH_CONF_5) &
++ DC_WR_CH_CONF_FIELD_MODE);
++ return 0;
++}
++
++void _ipu_dp_dc_enable(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ int di;
++ uint32_t reg;
++ uint32_t dc_chan;
++ int irq = 0;
++
++ if (channel == MEM_FG_SYNC)
++ irq = IPU_IRQ_DP_SF_END;
++ else if (channel == MEM_DC_SYNC)
++ dc_chan = 1;
++ else if (channel == MEM_BG_SYNC)
++ dc_chan = 5;
++ else
++ return;
++
++ if (channel == MEM_FG_SYNC) {
++ /* Enable FG channel */
++ reg = ipu_dp_read(ipu, DP_COM_CONF(DP_SYNC));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_FG_EN, DP_COM_CONF(DP_SYNC));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ return;
++ } else if (channel == MEM_BG_SYNC) {
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++
++ di = ipu->dc_di_assignment[dc_chan];
++
++ /* Make sure other DC sync channel is not assigned same DI */
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(6 - dc_chan));
++ if ((di << 2) == (reg & DC_WR_CH_CONF_PROG_DI_ID)) {
++ reg &= ~DC_WR_CH_CONF_PROG_DI_ID;
++ reg |= di ? 0 : DC_WR_CH_CONF_PROG_DI_ID;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(6 - dc_chan));
++ }
++
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ reg |= 4 << DC_WR_CH_CONF_PROG_TYPE_OFFSET;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ clk_prepare_enable(ipu->pixel_clk[di]);
++}
++
++static irqreturn_t dc_irq_handler(int irq, void *dev_id)
++{
++ struct ipu_soc *ipu = dev_id;
++ struct completion *comp = &ipu->dc_comp;
++ uint32_t reg;
++ uint32_t dc_chan;
++
++ if (irq == IPU_IRQ_DC_FC_1)
++ dc_chan = 1;
++ else
++ dc_chan = 5;
++
++ if (!ipu->dc_swap) {
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++
++ reg = ipu_cm_read(ipu, IPU_DISP_GEN);
++ if (ipu->dc_di_assignment[dc_chan])
++ reg &= ~DI1_COUNTER_RELEASE;
++ else
++ reg &= ~DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, reg, IPU_DISP_GEN);
++ }
++
++ complete(comp);
++ return IRQ_HANDLED;
++}
++
++void _ipu_dp_dc_disable(struct ipu_soc *ipu, ipu_channel_t channel, bool swap)
++{
++ int ret;
++ uint32_t reg;
++ uint32_t csc;
++ uint32_t dc_chan;
++ int irq = 0;
++ int timeout = 50;
++
++ ipu->dc_swap = swap;
++
++ if (channel == MEM_DC_SYNC) {
++ dc_chan = 1;
++ irq = IPU_IRQ_DC_FC_1;
++ } else if (channel == MEM_BG_SYNC) {
++ dc_chan = 5;
++ irq = IPU_IRQ_DP_SF_END;
++ } else if (channel == MEM_FG_SYNC) {
++ /* Disable FG channel */
++ dc_chan = 5;
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(DP_SYNC));
++ csc = reg & DP_COM_CONF_CSC_DEF_MASK;
++ if (csc == DP_COM_CONF_CSC_DEF_FG)
++ reg &= ~DP_COM_CONF_CSC_DEF_MASK;
++
++ reg &= ~DP_COM_CONF_FG_EN;
++ ipu_dp_write(ipu, reg, DP_COM_CONF(DP_SYNC));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ if (ipu_is_channel_busy(ipu, MEM_BG_SYNC)) {
++ ipu_cm_write(ipu, IPUIRQ_2_MASK(IPU_IRQ_DP_SF_END),
++ IPUIRQ_2_STATREG(IPU_IRQ_DP_SF_END));
++ while ((ipu_cm_read(ipu, IPUIRQ_2_STATREG(IPU_IRQ_DP_SF_END)) &
++ IPUIRQ_2_MASK(IPU_IRQ_DP_SF_END)) == 0) {
++ msleep(2);
++ timeout -= 2;
++ if (timeout <= 0)
++ break;
++ }
++ }
++ return;
++ } else {
++ return;
++ }
++
++ init_completion(&ipu->dc_comp);
++ ret = ipu_request_irq(ipu, irq, dc_irq_handler, 0, NULL, ipu);
++ if (ret < 0) {
++ dev_err(ipu->dev, "DC irq %d in use\n", irq);
++ return;
++ }
++ ret = wait_for_completion_timeout(&ipu->dc_comp, msecs_to_jiffies(50));
++ ipu_free_irq(ipu, irq, ipu);
++ dev_dbg(ipu->dev, "DC stop timeout - %d * 10ms\n", 5 - ret);
++
++ if (ipu->dc_swap) {
++ /* Swap DC channel 1 and 5 settings, and disable old dc chan */
++ reg = ipu_dc_read(ipu, DC_WR_CH_CONF(dc_chan));
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(6 - dc_chan));
++ reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
++ reg ^= DC_WR_CH_CONF_PROG_DI_ID;
++ ipu_dc_write(ipu, reg, DC_WR_CH_CONF(dc_chan));
++ }
++}
++
++void _ipu_init_dc_mappings(struct ipu_soc *ipu)
++{
++ /* IPU_PIX_FMT_RGB24 */
++ _ipu_dc_map_clear(ipu, 0);
++ _ipu_dc_map_config(ipu, 0, 0, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 0, 1, 15, 0xFF);
++ _ipu_dc_map_config(ipu, 0, 2, 23, 0xFF);
++
++ /* IPU_PIX_FMT_RGB666 */
++ _ipu_dc_map_clear(ipu, 1);
++ _ipu_dc_map_config(ipu, 1, 0, 5, 0xFC);
++ _ipu_dc_map_config(ipu, 1, 1, 11, 0xFC);
++ _ipu_dc_map_config(ipu, 1, 2, 17, 0xFC);
++
++ /* IPU_PIX_FMT_YUV444 */
++ _ipu_dc_map_clear(ipu, 2);
++ _ipu_dc_map_config(ipu, 2, 0, 15, 0xFF);
++ _ipu_dc_map_config(ipu, 2, 1, 23, 0xFF);
++ _ipu_dc_map_config(ipu, 2, 2, 7, 0xFF);
++
++ /* IPU_PIX_FMT_RGB565 */
++ _ipu_dc_map_clear(ipu, 3);
++ _ipu_dc_map_config(ipu, 3, 0, 4, 0xF8);
++ _ipu_dc_map_config(ipu, 3, 1, 10, 0xFC);
++ _ipu_dc_map_config(ipu, 3, 2, 15, 0xF8);
++
++ /* IPU_PIX_FMT_LVDS666 */
++ _ipu_dc_map_clear(ipu, 4);
++ _ipu_dc_map_config(ipu, 4, 0, 5, 0xFC);
++ _ipu_dc_map_config(ipu, 4, 1, 13, 0xFC);
++ _ipu_dc_map_config(ipu, 4, 2, 21, 0xFC);
++
++ /* IPU_PIX_FMT_VYUY 16bit width */
++ _ipu_dc_map_clear(ipu, 5);
++ _ipu_dc_map_config(ipu, 5, 0, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 5, 1, 0, 0x0);
++ _ipu_dc_map_config(ipu, 5, 2, 15, 0xFF);
++ _ipu_dc_map_clear(ipu, 6);
++ _ipu_dc_map_config(ipu, 6, 0, 0, 0x0);
++ _ipu_dc_map_config(ipu, 6, 1, 7, 0xFF);
++ _ipu_dc_map_config(ipu, 6, 2, 15, 0xFF);
++
++ /* IPU_PIX_FMT_UYUV 16bit width */
++ _ipu_dc_map_clear(ipu, 7);
++ _ipu_dc_map_link(ipu, 7, 6, 0, 6, 1, 6, 2);
++ _ipu_dc_map_clear(ipu, 8);
++ _ipu_dc_map_link(ipu, 8, 5, 0, 5, 1, 5, 2);
++
++ /* IPU_PIX_FMT_YUYV 16bit width */
++ _ipu_dc_map_clear(ipu, 9);
++ _ipu_dc_map_link(ipu, 9, 5, 2, 5, 1, 5, 0);
++ _ipu_dc_map_clear(ipu, 10);
++ _ipu_dc_map_link(ipu, 10, 5, 1, 5, 2, 5, 0);
++
++ /* IPU_PIX_FMT_YVYU 16bit width */
++ _ipu_dc_map_clear(ipu, 11);
++ _ipu_dc_map_link(ipu, 11, 5, 1, 5, 2, 5, 0);
++ _ipu_dc_map_clear(ipu, 12);
++ _ipu_dc_map_link(ipu, 12, 5, 2, 5, 1, 5, 0);
++
++ /* IPU_PIX_FMT_GBR24 */
++ /* IPU_PIX_FMT_VYU444 */
++ _ipu_dc_map_clear(ipu, 13);
++ _ipu_dc_map_link(ipu, 13, 0, 2, 0, 0, 0, 1);
++
++ /* IPU_PIX_FMT_BGR24 */
++ _ipu_dc_map_clear(ipu, 14);
++ _ipu_dc_map_link(ipu, 14, 0, 2, 0, 1, 0, 0);
++}
++
++int _ipu_pixfmt_to_map(uint32_t fmt)
++{
++ switch (fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_RGB24:
++ return 0;
++ case IPU_PIX_FMT_RGB666:
++ return 1;
++ case IPU_PIX_FMT_YUV444:
++ return 2;
++ case IPU_PIX_FMT_RGB565:
++ return 3;
++ case IPU_PIX_FMT_LVDS666:
++ return 4;
++ case IPU_PIX_FMT_VYUY:
++ return 6;
++ case IPU_PIX_FMT_UYVY:
++ return 8;
++ case IPU_PIX_FMT_YUYV:
++ return 10;
++ case IPU_PIX_FMT_YVYU:
++ return 12;
++ case IPU_PIX_FMT_GBR24:
++ case IPU_PIX_FMT_VYU444:
++ return 13;
++ case IPU_PIX_FMT_BGR24:
++ return 14;
++ }
++
++ return -1;
++}
++
++/*!
++ * This function sets the colorspace for of dp.
++ * modes.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param param If it's not NULL, update the csc table
++ * with this parameter.
++ *
++ * @return N/A
++ */
++void _ipu_dp_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
++{
++ int dp;
++ struct dp_csc_param_t dp_csc_param;
++
++ if (channel == MEM_FG_SYNC)
++ dp = DP_SYNC;
++ else if (channel == MEM_BG_SYNC)
++ dp = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0)
++ dp = DP_ASYNC0;
++ else
++ return;
++
++ dp_csc_param.mode = -1;
++ dp_csc_param.coeff = param;
++ __ipu_dp_csc_setup(ipu, dp, dp_csc_param, true);
++}
++
++void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3])
++{
++ _ipu_dp_set_csc_coefficients(ipu, channel, param);
++}
++EXPORT_SYMBOL(ipu_set_csc_coefficients);
++
++/*!
++ * This function is called to adapt synchronous LCD panel to IPU restriction.
++ *
++ */
++void adapt_panel_to_ipu_restricitions(struct ipu_soc *ipu, uint16_t *v_start_width,
++ uint16_t *v_sync_width,
++ uint16_t *v_end_width)
++{
++ if (*v_end_width < 2) {
++ uint16_t diff = 2 - *v_end_width;
++ if (*v_start_width >= diff) {
++ *v_end_width = 2;
++ *v_start_width = *v_start_width - diff;
++ } else if (*v_sync_width > diff) {
++ *v_end_width = 2;
++ *v_sync_width = *v_sync_width - diff;
++ } else
++ dev_err(ipu->dev, "WARNING: try to adapt timming, but failed\n");
++ dev_err(ipu->dev, "WARNING: adapt panel end blank lines\n");
++ }
++}
++
++/*!
++ * This function is called to initialize a synchronous LCD panel.
++ *
++ * @param ipu ipu handler
++ * @param disp The DI the panel is attached to.
++ *
++ * @param pixel_clk Desired pixel clock frequency in Hz.
++ *
++ * @param pixel_fmt Input parameter for pixel format of buffer.
++ * Pixel format is a FOURCC ASCII code.
++ *
++ * @param width The width of panel in pixels.
++ *
++ * @param height The height of panel in pixels.
++ *
++ * @param hStartWidth The number of pixel clocks between the HSYNC
++ * signal pulse and the start of valid data.
++ *
++ * @param hSyncWidth The width of the HSYNC signal in units of pixel
++ * clocks.
++ *
++ * @param hEndWidth The number of pixel clocks between the end of
++ * valid data and the HSYNC signal for next line.
++ *
++ * @param vStartWidth The number of lines between the VSYNC
++ * signal pulse and the start of valid data.
++ *
++ * @param vSyncWidth The width of the VSYNC signal in units of lines
++ *
++ * @param vEndWidth The number of lines between the end of valid
++ * data and the VSYNC signal for next frame.
++ *
++ * @param sig Bitfield of signal polarities for LCD interface.
++ *
++ * @return This function returns 0 on success or negative error code on
++ * fail.
++ */
++int32_t ipu_init_sync_panel(struct ipu_soc *ipu, int disp, uint32_t pixel_clk,
++ uint16_t width, uint16_t height,
++ uint32_t pixel_fmt,
++ uint16_t h_start_width, uint16_t h_sync_width,
++ uint16_t h_end_width, uint16_t v_start_width,
++ uint16_t v_sync_width, uint16_t v_end_width,
++ uint32_t v_to_h_sync, ipu_di_signal_cfg_t sig)
++{
++ uint32_t field0_offset = 0;
++ uint32_t field1_offset;
++ uint32_t reg;
++ uint32_t di_gen, vsync_cnt;
++ uint32_t div, rounded_pixel_clk;
++ uint32_t h_total, v_total;
++ int map;
++ int ret;
++ struct clk *ldb_di0_clk, *ldb_di1_clk;
++ struct clk *di_parent;
++
++ dev_dbg(ipu->dev, "panel size = %d x %d\n", width, height);
++
++ if ((v_sync_width == 0) || (h_sync_width == 0))
++ return -EINVAL;
++
++ adapt_panel_to_ipu_restricitions(ipu, &v_start_width, &v_sync_width, &v_end_width);
++ h_total = width + h_sync_width + h_start_width + h_end_width;
++ v_total = height + v_sync_width + v_start_width + v_end_width;
++
++ /* Init clocking */
++ dev_dbg(ipu->dev, "pixel clk = %d\n", pixel_clk);
++
++ di_parent = clk_get_parent(ipu->di_clk_sel[disp]);
++ if (!di_parent) {
++ dev_err(ipu->dev, "get di clk parent fail\n");
++ return -EINVAL;
++ }
++ ldb_di0_clk = clk_get(ipu->dev, "ldb_di0");
++ if (IS_ERR(ldb_di0_clk)) {
++ dev_err(ipu->dev, "clk_get di0 failed");
++ return PTR_ERR(ldb_di0_clk);
++ }
++ ldb_di1_clk = clk_get(ipu->dev, "ldb_di1");
++ if (IS_ERR(ldb_di1_clk)) {
++ dev_err(ipu->dev, "clk_get di1 failed");
++ return PTR_ERR(ldb_di1_clk);
++ }
++
++ if (ldb_di0_clk == di_parent || ldb_di1_clk == di_parent) {
++ /* if di clk parent is tve/ldb, then keep it;*/
++ dev_dbg(ipu->dev, "use special clk parent\n");
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp], ipu->di_clk[disp]);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk error:%d\n", ret);
++ return ret;
++ }
++ clk_put(ldb_di0_clk);
++ clk_put(ldb_di1_clk);
++ } else {
++ /* try ipu clk first*/
++ dev_dbg(ipu->dev, "try ipu internal clk\n");
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp], ipu->ipu_clk);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk error:%d\n", ret);
++ return ret;
++ }
++ rounded_pixel_clk = clk_round_rate(ipu->pixel_clk[disp], pixel_clk);
++ dev_dbg(ipu->dev, "rounded pix clk:%d\n", rounded_pixel_clk);
++ /*
++ * we will only use 1/2 fraction for ipu clk,
++ * so if the clk rate is not fit, try ext clk.
++ */
++ if (!sig.int_clk &&
++ ((rounded_pixel_clk >= pixel_clk + pixel_clk/200) ||
++ (rounded_pixel_clk <= pixel_clk - pixel_clk/200))) {
++ dev_dbg(ipu->dev, "try ipu ext di clk\n");
++
++ rounded_pixel_clk =
++ clk_round_rate(ipu->di_clk[disp], pixel_clk);
++ ret = clk_set_rate(ipu->di_clk[disp],
++ rounded_pixel_clk);
++ if (ret) {
++ dev_err(ipu->dev,
++ "set di clk rate error:%d\n", ret);
++ return ret;
++ }
++ dev_dbg(ipu->dev, "di clk:%d\n", rounded_pixel_clk);
++ ret = clk_set_parent(ipu->pixel_clk_sel[disp],
++ ipu->di_clk[disp]);
++ if (ret) {
++ dev_err(ipu->dev,
++ "set pixel clk parent error:%d\n", ret);
++ return ret;
++ }
++ }
++ }
++ rounded_pixel_clk = clk_round_rate(ipu->pixel_clk[disp], pixel_clk);
++ dev_dbg(ipu->dev, "round pixel clk:%d\n", rounded_pixel_clk);
++ ret = clk_set_rate(ipu->pixel_clk[disp], rounded_pixel_clk);
++ if (ret) {
++ dev_err(ipu->dev, "set pixel clk rate error:%d\n", ret);
++ return ret;
++ }
++ msleep(5);
++ /* Get integer portion of divider */
++ div = clk_get_rate(clk_get_parent(ipu->pixel_clk_sel[disp])) / rounded_pixel_clk;
++ dev_dbg(ipu->dev, "div:%d\n", div);
++ if (!div) {
++ dev_err(ipu->dev, "invalid pixel clk div = 0\n");
++ return -EINVAL;
++ }
++
++
++ mutex_lock(&ipu->mutex_lock);
++
++ _ipu_di_data_wave_config(ipu, disp, SYNC_WAVE, div - 1, div - 1);
++ _ipu_di_data_pin_config(ipu, disp, SYNC_WAVE, DI_PIN15, 3, 0, div * 2);
++
++ map = _ipu_pixfmt_to_map(pixel_fmt);
++ if (map < 0) {
++ dev_dbg(ipu->dev, "IPU_DISP: No MAP\n");
++ mutex_unlock(&ipu->mutex_lock);
++ return -EINVAL;
++ }
++
++ /*clear DI*/
++ di_gen = ipu_di_read(ipu, disp, DI_GENERAL);
++ di_gen &= (0x3 << 20);
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ if (sig.interlaced) {
++ if (g_ipu_hw_rev >= IPU_V3DEX) {
++ /* Internal VSYNC for each frame */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ DI_SYNC_COUNT_1, /* counter */
++ v_total*2 - 1, /* run count */
++ (3 - 1), /* run_resolution, counter 1 can reference to counter 6,7,8 with run_resolution=2,3,4 */
++ 1, /* offset */
++ (3 - 1), /* offset resolution, 3=counter 7 */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* HSYNC waveform on DI_PIN02 */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ DI_SYNC_HSYNC, /* counter */
++ h_total - 1, /* run count */
++ DI_SYNC_CLK, /* run_resolution, counter 2 can reference to counter 5,7 with run_resolution=3,4 */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 1, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_CLK, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 2*h_sync_width /* COUNT DOWN */
++ );
++
++ /* VSYNC waveform on DI_PIN03 */
++ vsync_cnt = DI_SYNC_VSYNC;
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ DI_SYNC_VSYNC, /* counter */
++ v_total - 1, /* run count */
++ (4 - 1), /* run_resolution, counter 3 can reference to counter 7 with run_resolution=4 */
++ 1, /* offset */
++ (4 - 1), /* offset resolution, 4=counter 7 */
++ 2, /* repeat count */
++ DI_SYNC_COUNT_1, /* CNT_CLR_SEL */
++ 1, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ (4 - 1), /* CNT_POLARITY_TRIGGER_SEL, 4=counter 7 */
++ 0, /* COUNT UP */
++ 2*v_sync_width /* COUNT DOWN */
++ );
++
++ /* Active Field */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ DI_SYNC_AFIELD, /* counter */
++ (v_total/2 + 1) - 1, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ h_total/2, /* offset */
++ DI_SYNC_CLK, /* offset resolution */
++ 2, /* repeat count */
++ DI_SYNC_COUNT_1, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Active Line */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ DI_SYNC_ALINE, /* counter */
++ 0, /* run count */
++ DI_SYNC_HSYNC, /* run_resolution */
++ (v_start_width + v_sync_width) / 2, /* offset */
++ DI_SYNC_HSYNC, /* offset resolution */
++ height/2, /* repeat count */
++ DI_SYNC_AFIELD, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Active Pixel */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ DI_SYNC_APIXEL, /* counter */
++ 0, /* run count */
++ DI_SYNC_CLK, /* run_resolution */
++ h_start_width + h_sync_width, /* offset */
++ DI_SYNC_CLK, /* offset resolution */
++ width, /* repeat count */
++ DI_SYNC_ALINE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ /* Half line HSYNC */
++ _ipu_di_sync_config(ipu,
++ disp, /* display */
++ DI_SYNC_COUNT_7, /* counter */
++ h_total/2 - 1, /* run count */
++ DI_SYNC_CLK, /* run_resolution */
++ 0, /* offset */
++ DI_SYNC_NONE, /* offset resolution */
++ 0, /* repeat count */
++ DI_SYNC_NONE, /* CNT_CLR_SEL */
++ 0, /* CNT_POLARITY_GEN_EN */
++ DI_SYNC_NONE, /* CNT_POLARITY_CLR_SEL */
++ DI_SYNC_NONE, /* CNT_POLARITY_TRIGGER_SEL */
++ 0, /* COUNT UP */
++ 0 /* COUNT DOWN */
++ );
++
++ ipu_di_write(ipu, disp, v_total / 2 - 1, DI_SCR_CONF);
++
++ /* set y_sel = 1 */
++ di_gen |= ((DI_SYNC_HSYNC-1)<<28);
++ } else {
++ /* Internal HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_INT_HSYNC, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ field1_offset = v_sync_width + v_start_width + height / 2 +
++ v_end_width;
++ if (sig.odd_field_first) {
++ field0_offset = field1_offset - 1;
++ field1_offset = 0;
++ }
++ v_total += v_start_width + v_end_width;
++
++ /* HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_HSYNC, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, DI_SYNC_NONE, 0, 4);
++
++ /* Field 1 VSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_VSYNC, v_total - 1, DI_SYNC_INT_HSYNC,
++ field0_offset,
++ field0_offset ? DI_SYNC_INT_HSYNC : DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, DI_SYNC_NONE, 0, 4);
++
++ /* Active Field */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_AFIELD,
++ field0_offset ?
++ field0_offset : field1_offset - 2,
++ DI_SYNC_INT_HSYNC, v_start_width + v_sync_width, DI_SYNC_INT_HSYNC,
++ 2, DI_SYNC_VSYNC, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0);
++
++ /* Active Line */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_ALINE, 0, DI_SYNC_INT_HSYNC,
++ 0, DI_SYNC_NONE,
++ height / 2, DI_SYNC_AFIELD, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* Active Pixel */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_APIXEL, 0, DI_SYNC_CLK,
++ h_sync_width + h_start_width, DI_SYNC_CLK,
++ width, DI_SYNC_ALINE, 0, DI_SYNC_NONE, DI_SYNC_NONE,
++ 0, 0);
++
++ /* DC VSYNC waveform */
++ vsync_cnt = DI_SYNC_COUNT_7;
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_COUNT_7, 0, DI_SYNC_INT_HSYNC,
++ field1_offset,
++ field1_offset ? DI_SYNC_INT_HSYNC : DI_SYNC_NONE,
++ 1, DI_SYNC_VSYNC, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0, 0);
++
++ /* Field 0 VSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_COUNT_8, v_total - 1, DI_SYNC_INT_HSYNC,
++ 0, DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* ??? */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_COUNT_9, v_total - 1, (DI_SYNC_HSYNC - 1),
++ 0, DI_SYNC_NONE,
++ 0, DI_SYNC_NONE, 6, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ reg = ipu_di_read(ipu, disp, DI_SW_GEN1(9));
++ reg |= 0x8000;
++ ipu_di_write(ipu, disp, reg, DI_SW_GEN1(9));
++
++ ipu_di_write(ipu, disp, v_sync_width + v_start_width +
++ v_end_width + height / 2 - 1, DI_SCR_CONF);
++ }
++
++ /* Init template microcode */
++ if (disp) {
++ _ipu_dc_write_tmpl(ipu, 1, WROD(0), 0, map, SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_write_tmpl(ipu, 8, WROD(0), 0, (map - 1), SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ _ipu_dc_write_tmpl(ipu, 9, WROD(0), 0, map, SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ /* configure user events according to DISP NUM */
++ ipu_dc_write(ipu, (width - 1), DC_UGDE_3(disp));
++ }
++ } else {
++ _ipu_dc_write_tmpl(ipu, 0, WROD(0), 0, map, SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_write_tmpl(ipu, 10, WROD(0), 0, (map - 1), SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ _ipu_dc_write_tmpl(ipu, 11, WROD(0), 0, map, SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ /* configure user events according to DISP NUM */
++ ipu_dc_write(ipu, width - 1, DC_UGDE_3(disp));
++ }
++ }
++
++ if (sig.Hsync_pol)
++ di_gen |= DI_GEN_POLARITY_2;
++ if (sig.Vsync_pol)
++ di_gen |= DI_GEN_POLARITY_3;
++ } else {
++ /* Setup internal HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_INT_HSYNC, h_total - 1, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 0, DI_SYNC_NONE, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++
++ /* Setup external (delayed) HSYNC waveform */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_HSYNC, h_total - 1,
++ DI_SYNC_CLK, div * v_to_h_sync, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE, 1, DI_SYNC_NONE,
++ DI_SYNC_CLK, 0, h_sync_width * 2);
++ /* Setup VSYNC waveform */
++ vsync_cnt = DI_SYNC_VSYNC;
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_VSYNC, v_total - 1,
++ DI_SYNC_INT_HSYNC, 0, DI_SYNC_NONE, 0,
++ DI_SYNC_NONE, 1, DI_SYNC_NONE,
++ DI_SYNC_INT_HSYNC, 0, v_sync_width * 2);
++ ipu_di_write(ipu, disp, v_total - 1, DI_SCR_CONF);
++
++ /* Setup active data waveform to sync with DC */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_ALINE, 0, DI_SYNC_HSYNC,
++ v_sync_width + v_start_width, DI_SYNC_HSYNC, height,
++ DI_SYNC_VSYNC, 0, DI_SYNC_NONE,
++ DI_SYNC_NONE, 0, 0);
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_APIXEL, 0, DI_SYNC_CLK,
++ h_sync_width + h_start_width, DI_SYNC_CLK,
++ width, DI_SYNC_ALINE, 0, DI_SYNC_NONE, DI_SYNC_NONE, 0,
++ 0);
++
++ /* set VGA delayed hsync/vsync no matter VGA enabled */
++ if (disp) {
++ /* couter 7 for VGA delay HSYNC */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_COUNT_7,
++ h_total - 1, DI_SYNC_CLK,
++ 18, DI_SYNC_CLK,
++ 0, DI_SYNC_NONE,
++ 1, DI_SYNC_NONE, DI_SYNC_CLK,
++ 0, h_sync_width * 2);
++
++ /* couter 8 for VGA delay VSYNC */
++ _ipu_di_sync_config(ipu, disp, DI_SYNC_COUNT_8,
++ v_total - 1, DI_SYNC_INT_HSYNC,
++ 1, DI_SYNC_INT_HSYNC,
++ 0, DI_SYNC_NONE,
++ 1, DI_SYNC_NONE, DI_SYNC_INT_HSYNC,
++ 0, v_sync_width * 2);
++ }
++
++ /* reset all unused counters */
++ if (!disp) {
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(7));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(7));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(7));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(8));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(8));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(8));
++ }
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN0(9));
++ ipu_di_write(ipu, disp, 0, DI_SW_GEN1(9));
++ ipu_di_write(ipu, disp, 0, DI_STP_REP(9));
++
++ /* Init template microcode */
++ if (disp) {
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_write_tmpl(ipu, 8, WROD(0), 0, (map - 1), SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ _ipu_dc_write_tmpl(ipu, 9, WROD(0), 0, map, SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ /* configure user events according to DISP NUM */
++ ipu_dc_write(ipu, (width - 1), DC_UGDE_3(disp));
++ }
++ _ipu_dc_write_tmpl(ipu, 2, WROD(0), 0, map, SYNC_WAVE, 8, DI_SYNC_APIXEL, 1);
++ _ipu_dc_write_tmpl(ipu, 3, WROD(0), 0, map, SYNC_WAVE, 4, DI_SYNC_APIXEL, 0);
++ _ipu_dc_write_tmpl(ipu, 4, WRG, 0, map, NULL_WAVE, 0, DI_SYNC_CLK, 1);
++ _ipu_dc_write_tmpl(ipu, 1, WROD(0), 0, map, SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++
++ } else {
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY)) {
++ _ipu_dc_write_tmpl(ipu, 10, WROD(0), 0, (map - 1), SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ _ipu_dc_write_tmpl(ipu, 11, WROD(0), 0, map, SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ /* configure user events according to DISP NUM */
++ ipu_dc_write(ipu, width - 1, DC_UGDE_3(disp));
++ }
++ _ipu_dc_write_tmpl(ipu, 5, WROD(0), 0, map, SYNC_WAVE, 8, DI_SYNC_APIXEL, 1);
++ _ipu_dc_write_tmpl(ipu, 6, WROD(0), 0, map, SYNC_WAVE, 4, DI_SYNC_APIXEL, 0);
++ _ipu_dc_write_tmpl(ipu, 7, WRG, 0, map, NULL_WAVE, 0, DI_SYNC_CLK, 1);
++ _ipu_dc_write_tmpl(ipu, 12, WROD(0), 0, map, SYNC_WAVE, 0, DI_SYNC_APIXEL, 1);
++ }
++
++ if (sig.Hsync_pol) {
++ di_gen |= DI_GEN_POLARITY_2;
++ if (disp)
++ di_gen |= DI_GEN_POLARITY_7;
++ }
++ if (sig.Vsync_pol) {
++ di_gen |= DI_GEN_POLARITY_3;
++ if (disp)
++ di_gen |= DI_GEN_POLARITY_8;
++ }
++ }
++ /* changinc DISP_CLK polarity: it can be wrong for some applications */
++ if ((pixel_fmt == IPU_PIX_FMT_YUYV) ||
++ (pixel_fmt == IPU_PIX_FMT_UYVY) ||
++ (pixel_fmt == IPU_PIX_FMT_YVYU) ||
++ (pixel_fmt == IPU_PIX_FMT_VYUY))
++ di_gen |= 0x00020000;
++
++ if (!sig.clk_pol)
++ di_gen |= DI_GEN_POLARITY_DISP_CLK;
++
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ ipu_di_write(ipu, disp, (--vsync_cnt << DI_VSYNC_SEL_OFFSET) |
++ 0x00000002, DI_SYNC_AS_GEN);
++ reg = ipu_di_read(ipu, disp, DI_POL);
++ reg &= ~(DI_POL_DRDY_DATA_POLARITY | DI_POL_DRDY_POLARITY_15);
++ if (sig.enable_pol)
++ reg |= DI_POL_DRDY_POLARITY_15;
++ if (sig.data_pol)
++ reg |= DI_POL_DRDY_DATA_POLARITY;
++ ipu_di_write(ipu, disp, reg, DI_POL);
++
++ ipu_dc_write(ipu, width, DC_DISP_CONF2(DC_DISP_ID_SYNC(disp)));
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_sync_panel);
++
++void ipu_uninit_sync_panel(struct ipu_soc *ipu, int disp)
++{
++ uint32_t reg;
++ uint32_t di_gen;
++
++ if ((disp != 0) || (disp != 1))
++ return;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ di_gen = ipu_di_read(ipu, disp, DI_GENERAL);
++ di_gen |= 0x3ff | DI_GEN_POLARITY_DISP_CLK;
++ ipu_di_write(ipu, disp, di_gen, DI_GENERAL);
++
++ reg = ipu_di_read(ipu, disp, DI_POL);
++ reg |= 0x3ffffff;
++ ipu_di_write(ipu, disp, reg, DI_POL);
++
++ mutex_unlock(&ipu->mutex_lock);
++}
++EXPORT_SYMBOL(ipu_uninit_sync_panel);
++
++int ipu_init_async_panel(struct ipu_soc *ipu, int disp, int type, uint32_t cycle_time,
++ uint32_t pixel_fmt, ipu_adc_sig_cfg_t sig)
++{
++ int map;
++ u32 ser_conf = 0;
++ u32 div;
++ u32 di_clk = clk_get_rate(ipu->ipu_clk);
++
++ /* round up cycle_time, then calcalate the divider using scaled math */
++ cycle_time += (1000000000UL / di_clk) - 1;
++ div = (cycle_time * (di_clk / 256UL)) / (1000000000UL / 256UL);
++
++ map = _ipu_pixfmt_to_map(pixel_fmt);
++ if (map < 0)
++ return -EINVAL;
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (type == IPU_PANEL_SERIAL) {
++ ipu_di_write(ipu, disp, (div << 24) | ((sig.ifc_width - 1) << 4),
++ DI_DW_GEN(ASYNC_SER_WAVE));
++
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_CS,
++ 0, 0, (div * 2) + 1);
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_SER_CLK,
++ 1, div, div * 2);
++ _ipu_di_data_pin_config(ipu, disp, ASYNC_SER_WAVE, DI_PIN_SER_RS,
++ 2, 0, 0);
++
++ _ipu_dc_write_tmpl(ipu, 0x64, WROD(0), 0, map, ASYNC_SER_WAVE, 0, 0, 1);
++
++ /* Configure DC for serial panel */
++ ipu_dc_write(ipu, 0x14, DC_DISP_CONF1(DC_DISP_ID_SERIAL));
++
++ if (sig.clk_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_CLK_POL;
++ if (sig.data_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_DATA_POL;
++ if (sig.rs_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_RS_POL;
++ if (sig.cs_pol)
++ ser_conf |= DI_SER_CONF_SERIAL_CS_POL;
++ ipu_di_write(ipu, disp, ser_conf, DI_SER_CONF);
++ }
++
++ mutex_unlock(&ipu->mutex_lock);
++ return 0;
++}
++EXPORT_SYMBOL(ipu_init_async_panel);
++
++/*!
++ * This function sets the foreground and background plane global alpha blending
++ * modes. This function also sets the DP graphic plane according to the
++ * parameter of IPUv3 DP channel.
++ *
++ * @param ipu ipu handler
++ * @param channel IPUv3 DP channel
++ *
++ * @param enable Boolean to enable or disable global alpha
++ * blending. If disabled, local blending is used.
++ *
++ * @param alpha Global alpha value.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_global_alpha(struct ipu_soc *ipu, ipu_channel_t channel,
++ bool enable, uint8_t alpha)
++{
++ uint32_t reg;
++ uint32_t flow;
++ bool bg_chan;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_BG_ASYNC0 ||
++ channel == MEM_BG_ASYNC1)
++ bg_chan = true;
++ else
++ bg_chan = false;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ if (bg_chan) {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWSEL, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWSEL, DP_COM_CONF(flow));
++ }
++
++ if (enable) {
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(flow)) & 0x00FFFFFFL;
++ ipu_dp_write(ipu, reg | ((uint32_t) alpha << 24),
++ DP_GRAPH_WIND_CTRL(flow));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWAM, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWAM, DP_COM_CONF(flow));
++ }
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_global_alpha);
++
++/*!
++ * This function sets the transparent color key for SDC graphic plane.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param enable Boolean to enable or disable color key
++ *
++ * @param colorKey 24-bit RGB color for transparent color key.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_color_key(struct ipu_soc *ipu, ipu_channel_t channel,
++ bool enable, uint32_t color_key)
++{
++ uint32_t reg, flow;
++ int y, u, v;
++ int red, green, blue;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ ipu->color_key_4rgb = true;
++ /* Transform color key from rgb to yuv if CSC is enabled */
++ if (((ipu->fg_csc_type == RGB2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == RGB2YUV)) ||
++ ((ipu->fg_csc_type == YUV2YUV) && (ipu->bg_csc_type == YUV2YUV)) ||
++ ((ipu->fg_csc_type == YUV2RGB) && (ipu->bg_csc_type == YUV2RGB))) {
++
++ dev_dbg(ipu->dev, "color key 0x%x need change to yuv fmt\n", color_key);
++
++ red = (color_key >> 16) & 0xFF;
++ green = (color_key >> 8) & 0xFF;
++ blue = color_key & 0xFF;
++
++ y = _rgb_to_yuv(0, red, green, blue);
++ u = _rgb_to_yuv(1, red, green, blue);
++ v = _rgb_to_yuv(2, red, green, blue);
++ color_key = (y << 16) | (u << 8) | v;
++
++ ipu->color_key_4rgb = false;
++
++ dev_dbg(ipu->dev, "color key change to yuv fmt 0x%x\n", color_key);
++ }
++
++ if (enable) {
++ reg = ipu_dp_read(ipu, DP_GRAPH_WIND_CTRL(flow)) & 0xFF000000L;
++ ipu_dp_write(ipu, reg | color_key, DP_GRAPH_WIND_CTRL(flow));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GWCKE, DP_COM_CONF(flow));
++ } else {
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GWCKE, DP_COM_CONF(flow));
++ }
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_color_key);
++
++/*!
++ * This function sets the gamma correction for DP output.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param enable Boolean to enable or disable gamma correction.
++ *
++ * @param constk Gamma piecewise linear approximation constk coeff.
++ *
++ * @param slopek Gamma piecewise linear approximation slopek coeff.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t ipu_disp_set_gamma_correction(struct ipu_soc *ipu, ipu_channel_t channel, bool enable, int constk[], int slopek[])
++{
++ uint32_t reg, flow, i;
++
++ if (channel == MEM_BG_SYNC || channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_BG_ASYNC0 || channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_BG_ASYNC1 || channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ _ipu_get(ipu);
++
++ mutex_lock(&ipu->mutex_lock);
++
++ for (i = 0; i < 8; i++)
++ ipu_dp_write(ipu, (constk[2*i] & 0x1ff) | ((constk[2*i+1] & 0x1ff) << 16), DP_GAMMA_C(flow, i));
++ for (i = 0; i < 4; i++)
++ ipu_dp_write(ipu, (slopek[4*i] & 0xff) | ((slopek[4*i+1] & 0xff) << 8) |
++ ((slopek[4*i+2] & 0xff) << 16) | ((slopek[4*i+3] & 0xff) << 24), DP_GAMMA_S(flow, i));
++
++ reg = ipu_dp_read(ipu, DP_COM_CONF(flow));
++ if (enable) {
++ if ((ipu->bg_csc_type == RGB2YUV) || (ipu->bg_csc_type == YUV2YUV))
++ reg |= DP_COM_CONF_GAMMA_YUV_EN;
++ else
++ reg &= ~DP_COM_CONF_GAMMA_YUV_EN;
++ ipu_dp_write(ipu, reg | DP_COM_CONF_GAMMA_EN, DP_COM_CONF(flow));
++ } else
++ ipu_dp_write(ipu, reg & ~DP_COM_CONF_GAMMA_EN, DP_COM_CONF(flow));
++
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) | 0x8;
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++
++ mutex_unlock(&ipu->mutex_lock);
++
++ _ipu_put(ipu);
++
++ return 0;
++}
++EXPORT_SYMBOL(ipu_disp_set_gamma_correction);
++
++/*!
++ * This function sets the window position of the foreground or background plane.
++ * modes.
++ *
++ * @param ipu ipu handler
++ * @param channel Input parameter for the logical channel ID.
++ *
++ * @param x_pos The X coordinate position to place window at.
++ * The position is relative to the top left corner.
++ *
++ * @param y_pos The Y coordinate position to place window at.
++ * The position is relative to the top left corner.
++ *
++ * @return Returns 0 on success or negative error code on fail
++ */
++int32_t _ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos)
++{
++ u32 reg;
++ uint32_t flow = 0;
++ uint32_t dp_srm_shift;
++
++ if ((channel == MEM_FG_SYNC) || (channel == MEM_BG_SYNC)) {
++ flow = DP_SYNC;
++ dp_srm_shift = 3;
++ } else if (channel == MEM_FG_ASYNC0) {
++ flow = DP_ASYNC0;
++ dp_srm_shift = 5;
++ } else if (channel == MEM_FG_ASYNC1) {
++ flow = DP_ASYNC1;
++ dp_srm_shift = 7;
++ } else
++ return -EINVAL;
++
++ ipu_dp_write(ipu, (x_pos << 16) | y_pos, DP_FG_POS(flow));
++
++ if (ipu_is_channel_busy(ipu, channel)) {
++ /* controled by FSU if channel enabled */
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) & (~(0x3 << dp_srm_shift));
++ reg |= (0x1 << dp_srm_shift);
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ } else {
++ /* disable auto swap, controled by MCU if channel disabled */
++ reg = ipu_cm_read(ipu, IPU_SRM_PRI2) & (~(0x3 << dp_srm_shift));
++ ipu_cm_write(ipu, reg, IPU_SRM_PRI2);
++ }
++
++ return 0;
++}
++
++int32_t ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos)
++{
++ int ret;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ret = _ipu_disp_set_window_pos(ipu, channel, x_pos, y_pos);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_disp_set_window_pos);
++
++int32_t _ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos)
++{
++ u32 reg;
++ uint32_t flow = 0;
++
++ if (channel == MEM_FG_SYNC)
++ flow = DP_SYNC;
++ else if (channel == MEM_FG_ASYNC0)
++ flow = DP_ASYNC0;
++ else if (channel == MEM_FG_ASYNC1)
++ flow = DP_ASYNC1;
++ else
++ return -EINVAL;
++
++ reg = ipu_dp_read(ipu, DP_FG_POS(flow));
++
++ *x_pos = (reg >> 16) & 0x7FF;
++ *y_pos = reg & 0x7FF;
++
++ return 0;
++}
++int32_t ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos)
++{
++ int ret;
++
++ _ipu_get(ipu);
++ mutex_lock(&ipu->mutex_lock);
++ ret = _ipu_disp_get_window_pos(ipu, channel, x_pos, y_pos);
++ mutex_unlock(&ipu->mutex_lock);
++ _ipu_put(ipu);
++ return ret;
++}
++EXPORT_SYMBOL(ipu_disp_get_window_pos);
++
++void ipu_disp_direct_write(struct ipu_soc *ipu, ipu_channel_t channel, u32 value, u32 offset)
++{
++ if (channel == DIRECT_ASYNC0)
++ writel(value, ipu->disp_base[0] + offset);
++ else if (channel == DIRECT_ASYNC1)
++ writel(value, ipu->disp_base[1] + offset);
++}
++EXPORT_SYMBOL(ipu_disp_direct_write);
++
++void ipu_reset_disp_panel(struct ipu_soc *ipu)
++{
++ uint32_t tmp;
++
++ tmp = ipu_di_read(ipu, 1, DI_GENERAL);
++ ipu_di_write(ipu, 1, tmp | 0x08, DI_GENERAL);
++ msleep(10); /* tRES >= 100us */
++ tmp = ipu_di_read(ipu, 1, DI_GENERAL);
++ ipu_di_write(ipu, 1, tmp & ~0x08, DI_GENERAL);
++ msleep(60);
++
++ return;
++}
++EXPORT_SYMBOL(ipu_reset_disp_panel);
++
++void ipu_disp_init(struct ipu_soc *ipu)
++{
++ ipu->fg_csc_type = ipu->bg_csc_type = CSC_NONE;
++ ipu->color_key_4rgb = true;
++ _ipu_init_dc_mappings(ipu);
++ _ipu_dmfc_init(ipu, DMFC_NORMAL, 1);
++}
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_ic.c linux-xbian-imx6/drivers/mxc/ipu3/ipu_ic.c
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_ic.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_ic.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,924 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_ic.c
++ *
++ * @brief IPU IC functions
++ *
++ * @ingroup IPU
++ */
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/spinlock.h>
++#include <linux/types.h>
++#include <linux/videodev2.h>
++
++#include "ipu_param_mem.h"
++#include "ipu_regs.h"
++
++enum {
++ IC_TASK_VIEWFINDER,
++ IC_TASK_ENCODER,
++ IC_TASK_POST_PROCESSOR
++};
++
++static void _init_csc(struct ipu_soc *ipu, uint8_t ic_task, ipu_color_space_t in_format,
++ ipu_color_space_t out_format, int csc_index);
++
++static int _calc_resize_coeffs(struct ipu_soc *ipu,
++ uint32_t inSize, uint32_t outSize,
++ uint32_t *resizeCoeff,
++ uint32_t *downsizeCoeff);
++
++void _ipu_vdi_set_top_field_man(struct ipu_soc *ipu, bool top_field_0)
++{
++ uint32_t reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ if (top_field_0)
++ reg &= ~VDI_C_TOP_FIELD_MAN_1;
++ else
++ reg |= VDI_C_TOP_FIELD_MAN_1;
++ ipu_vdi_write(ipu, reg, VDI_C);
++}
++
++void _ipu_vdi_set_motion(struct ipu_soc *ipu, ipu_motion_sel motion_sel)
++{
++ uint32_t reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ reg &= ~(VDI_C_MOT_SEL_FULL | VDI_C_MOT_SEL_MED | VDI_C_MOT_SEL_LOW);
++ if (motion_sel == HIGH_MOTION)
++ reg |= VDI_C_MOT_SEL_FULL;
++ else if (motion_sel == MED_MOTION)
++ reg |= VDI_C_MOT_SEL_MED;
++ else
++ reg |= VDI_C_MOT_SEL_LOW;
++
++ ipu_vdi_write(ipu, reg, VDI_C);
++ dev_dbg(ipu->dev, "VDI_C = \t0x%08X\n", reg);
++}
++
++void ic_dump_register(struct ipu_soc *ipu)
++{
++ printk(KERN_DEBUG "IC_CONF = \t0x%08X\n", ipu_ic_read(ipu, IC_CONF));
++ printk(KERN_DEBUG "IC_PRP_ENC_RSC = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_PRP_ENC_RSC));
++ printk(KERN_DEBUG "IC_PRP_VF_RSC = \t0x%08X\n",
++ ipu_ic_read(ipu, IC_PRP_VF_RSC));
++ printk(KERN_DEBUG "IC_PP_RSC = \t0x%08X\n", ipu_ic_read(ipu, IC_PP_RSC));
++ printk(KERN_DEBUG "IC_IDMAC_1 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_1));
++ printk(KERN_DEBUG "IC_IDMAC_2 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_2));
++ printk(KERN_DEBUG "IC_IDMAC_3 = \t0x%08X\n", ipu_ic_read(ipu, IC_IDMAC_3));
++}
++
++void _ipu_ic_enable_task(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t ic_conf;
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++ switch (channel) {
++ case CSI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_MEM:
++ ic_conf |= IC_CONF_PRPVF_EN | IC_CONF_RWS_EN ;
++ break;
++ case MEM_ROT_VF_MEM:
++ ic_conf |= IC_CONF_PRPVF_ROT_EN;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case MEM_PRP_ENC_MEM:
++ ic_conf |= IC_CONF_PRPENC_EN;
++ break;
++ case MEM_ROT_ENC_MEM:
++ ic_conf |= IC_CONF_PRPENC_ROT_EN;
++ break;
++ case MEM_PP_MEM:
++ ic_conf |= IC_CONF_PP_EN;
++ break;
++ case MEM_ROT_PP_MEM:
++ ic_conf |= IC_CONF_PP_ROT_EN;
++ break;
++ default:
++ break;
++ }
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++}
++
++void _ipu_ic_disable_task(struct ipu_soc *ipu, ipu_channel_t channel)
++{
++ uint32_t ic_conf;
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++ switch (channel) {
++ case CSI_PRP_VF_MEM:
++ case MEM_PRP_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_PRP_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_EN;
++ break;
++ case MEM_VDI_MEM:
++ ic_conf &= ~(IC_CONF_PRPVF_EN | IC_CONF_RWS_EN);
++ break;
++ case MEM_ROT_VF_MEM:
++ ic_conf &= ~IC_CONF_PRPVF_ROT_EN;
++ break;
++ case CSI_PRP_ENC_MEM:
++ case MEM_PRP_ENC_MEM:
++ ic_conf &= ~IC_CONF_PRPENC_EN;
++ break;
++ case MEM_ROT_ENC_MEM:
++ ic_conf &= ~IC_CONF_PRPENC_ROT_EN;
++ break;
++ case MEM_PP_MEM:
++ ic_conf &= ~IC_CONF_PP_EN;
++ break;
++ case MEM_ROT_PP_MEM:
++ ic_conf &= ~IC_CONF_PP_ROT_EN;
++ break;
++ default:
++ break;
++ }
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++}
++
++void _ipu_vdi_init(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params)
++{
++ uint32_t reg;
++ uint32_t pixel_fmt;
++ uint32_t pix_per_burst;
++
++ reg = ((params->mem_prp_vf_mem.in_height-1) << 16) |
++ (params->mem_prp_vf_mem.in_width-1);
++ ipu_vdi_write(ipu, reg, VDI_FSIZE);
++
++ /* Full motion, only vertical filter is used
++ Burst size is 4 accesses */
++ if (params->mem_prp_vf_mem.in_pixel_fmt ==
++ IPU_PIX_FMT_UYVY ||
++ params->mem_prp_vf_mem.in_pixel_fmt ==
++ IPU_PIX_FMT_YUYV) {
++ pixel_fmt = VDI_C_CH_422;
++ pix_per_burst = 32;
++ } else {
++ pixel_fmt = VDI_C_CH_420;
++ pix_per_burst = 64;
++ }
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ reg |= pixel_fmt;
++ switch (channel) {
++ case MEM_VDI_PRP_VF_MEM:
++ reg |= VDI_C_BURST_SIZE2_4;
++ break;
++ case MEM_VDI_PRP_VF_MEM_P:
++ reg |= VDI_C_BURST_SIZE1_4 | VDI_C_VWM1_SET_1 | VDI_C_VWM1_CLR_2;
++ break;
++ case MEM_VDI_PRP_VF_MEM_N:
++ reg |= VDI_C_BURST_SIZE3_4 | VDI_C_VWM3_SET_1 | VDI_C_VWM3_CLR_2;
++ break;
++
++ case MEM_VDI_MEM:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE2_OFFSET;
++ break;
++ case MEM_VDI_MEM_P:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE1_OFFSET;
++ reg |= VDI_C_VWM1_SET_2 | VDI_C_VWM1_CLR_2;
++ break;
++ case MEM_VDI_MEM_N:
++ reg |= (((pix_per_burst >> 2) - 1) & VDI_C_BURST_SIZE_MASK)
++ << VDI_C_BURST_SIZE3_OFFSET;
++ reg |= VDI_C_VWM3_SET_2 | VDI_C_VWM3_CLR_2;
++ break;
++ default:
++ break;
++ }
++ ipu_vdi_write(ipu, reg, VDI_C);
++
++ if (params->mem_prp_vf_mem.field_fmt == IPU_DEINTERLACE_FIELD_TOP)
++ _ipu_vdi_set_top_field_man(ipu, true);
++ else if (params->mem_prp_vf_mem.field_fmt == IPU_DEINTERLACE_FIELD_BOTTOM)
++ _ipu_vdi_set_top_field_man(ipu, false);
++
++ _ipu_vdi_set_motion(ipu, params->mem_prp_vf_mem.motion_sel);
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_RWS_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_vdi_uninit(struct ipu_soc *ipu)
++{
++ ipu_vdi_write(ipu, 0, VDI_FSIZE);
++ ipu_vdi_write(ipu, 0, VDI_C);
++}
++
++int _ipu_ic_init_prpvf(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_prp_vf_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_vf_mem.in_height,
++ params->mem_prp_vf_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpvf height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else
++ reg = (params->mem_prp_vf_mem.outv_resize_ratio) << 16;
++
++ /* Setup horizontal resizing */
++ if (!params->mem_prp_vf_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_vf_mem.in_width,
++ params->mem_prp_vf_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpvf width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else
++ reg |= params->mem_prp_vf_mem.outh_resize_ratio;
++
++ ipu_ic_write(ipu, reg, IC_PRP_VF_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_prp_vf_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_vf_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_prp_vf_mem.graphics_combine_en) {
++ ic_conf |= IC_CONF_PRPVF_CMB;
++
++ if (!(ic_conf & IC_CONF_PRPVF_CSC1)) {
++ /* need transparent CSC1 conversion */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, RGB, 1);
++ ic_conf |= IC_CONF_PRPVF_CSC1; /* Enable RGB->RGB CSC */
++ }
++ in_fmt = format_to_colorspace(params->mem_prp_vf_mem.in_g_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_vf_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC2 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, RGB, out_fmt, 2);
++ ic_conf |= IC_CONF_PRPVF_CSC2;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC2 */
++ _init_csc(ipu, IC_TASK_VIEWFINDER, YCbCr, RGB, 2);
++ ic_conf |= IC_CONF_PRPVF_CSC2;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_prp_vf_mem.global_alpha_en) {
++ ic_conf |= IC_CONF_IC_GLB_LOC_A;
++ reg = ipu_ic_read(ipu, IC_CMBP_1);
++ reg &= ~(0xff);
++ reg |= params->mem_prp_vf_mem.alpha;
++ ipu_ic_write(ipu, reg, IC_CMBP_1);
++ } else
++ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
++
++ if (params->mem_prp_vf_mem.key_color_en) {
++ ic_conf |= IC_CONF_KEY_COLOR_EN;
++ ipu_ic_write(ipu, params->mem_prp_vf_mem.key_color,
++ IC_CMBP_2);
++ } else
++ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
++ } else {
++ ic_conf &= ~IC_CONF_PRPVF_CMB;
++ }
++
++ if (src_is_csi)
++ ic_conf &= ~IC_CONF_RWS_EN;
++ else
++ ic_conf |= IC_CONF_RWS_EN;
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_prpvf(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPVF_EN | IC_CONF_PRPVF_CMB |
++ IC_CONF_PRPVF_CSC2 | IC_CONF_PRPVF_CSC1);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_vf(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_vf(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_PRPVF_ROT_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_init_prpenc(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_prp_enc_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu,
++ params->mem_prp_enc_mem.in_height,
++ params->mem_prp_enc_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpenc height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else
++ reg = (params->mem_prp_enc_mem.outv_resize_ratio) << 16;
++
++ /* Setup horizontal resizing */
++ if (!params->mem_prp_enc_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_prp_enc_mem.in_width,
++ params->mem_prp_enc_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate prpenc width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else
++ reg |= params->mem_prp_enc_mem.outh_resize_ratio;
++
++ ipu_ic_write(ipu, reg, IC_PRP_ENC_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_prp_enc_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_prp_enc_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_ENCODER, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PRPENC_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_ENCODER, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PRPENC_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (src_is_csi)
++ ic_conf &= ~IC_CONF_RWS_EN;
++ else
++ ic_conf |= IC_CONF_RWS_EN;
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_prpenc(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_enc(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_enc(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PRPENC_ROT_EN);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_init_pp(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++ uint32_t reg, ic_conf;
++ uint32_t downsizeCoeff, resizeCoeff;
++ ipu_color_space_t in_fmt, out_fmt;
++ int ret = 0;
++
++ /* Setup vertical resizing */
++ if (!params->mem_pp_mem.outv_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_pp_mem.in_height,
++ params->mem_pp_mem.out_height,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate pp height "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg = (downsizeCoeff << 30) | (resizeCoeff << 16);
++ } else {
++ reg = (params->mem_pp_mem.outv_resize_ratio) << 16;
++ }
++
++ /* Setup horizontal resizing */
++ if (!params->mem_pp_mem.outh_resize_ratio) {
++ ret = _calc_resize_coeffs(ipu, params->mem_pp_mem.in_width,
++ params->mem_pp_mem.out_width,
++ &resizeCoeff, &downsizeCoeff);
++ if (ret < 0) {
++ dev_err(ipu->dev, "failed to calculate pp width "
++ "scaling coefficients\n");
++ return ret;
++ }
++
++ reg |= (downsizeCoeff << 14) | resizeCoeff;
++ } else {
++ reg |= params->mem_pp_mem.outh_resize_ratio;
++ }
++
++ ipu_ic_write(ipu, reg, IC_PP_RSC);
++
++ ic_conf = ipu_ic_read(ipu, IC_CONF);
++
++ /* Setup color space conversion */
++ in_fmt = format_to_colorspace(params->mem_pp_mem.in_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_pp_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC1 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, out_fmt, 1);
++ ic_conf |= IC_CONF_PP_CSC1;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC1 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, YCbCr, RGB, 1);
++ ic_conf |= IC_CONF_PP_CSC1;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_pp_mem.graphics_combine_en) {
++ ic_conf |= IC_CONF_PP_CMB;
++
++ if (!(ic_conf & IC_CONF_PP_CSC1)) {
++ /* need transparent CSC1 conversion */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, RGB, 1);
++ ic_conf |= IC_CONF_PP_CSC1; /* Enable RGB->RGB CSC */
++ }
++
++ in_fmt = format_to_colorspace(params->mem_pp_mem.in_g_pixel_fmt);
++ out_fmt = format_to_colorspace(params->mem_pp_mem.out_pixel_fmt);
++ if (in_fmt == RGB) {
++ if ((out_fmt == YCbCr) || (out_fmt == YUV)) {
++ /* Enable RGB->YCBCR CSC2 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, RGB, out_fmt, 2);
++ ic_conf |= IC_CONF_PP_CSC2;
++ }
++ }
++ if ((in_fmt == YCbCr) || (in_fmt == YUV)) {
++ if (out_fmt == RGB) {
++ /* Enable YCBCR->RGB CSC2 */
++ _init_csc(ipu, IC_TASK_POST_PROCESSOR, YCbCr, RGB, 2);
++ ic_conf |= IC_CONF_PP_CSC2;
++ } else {
++ /* TODO: Support YUV<->YCbCr conversion? */
++ }
++ }
++
++ if (params->mem_pp_mem.global_alpha_en) {
++ ic_conf |= IC_CONF_IC_GLB_LOC_A;
++ reg = ipu_ic_read(ipu, IC_CMBP_1);
++ reg &= ~(0xff00);
++ reg |= (params->mem_pp_mem.alpha << 8);
++ ipu_ic_write(ipu, reg, IC_CMBP_1);
++ } else
++ ic_conf &= ~IC_CONF_IC_GLB_LOC_A;
++
++ if (params->mem_pp_mem.key_color_en) {
++ ic_conf |= IC_CONF_KEY_COLOR_EN;
++ ipu_ic_write(ipu, params->mem_pp_mem.key_color,
++ IC_CMBP_2);
++ } else
++ ic_conf &= ~IC_CONF_KEY_COLOR_EN;
++ } else {
++ ic_conf &= ~IC_CONF_PP_CMB;
++ }
++
++ ipu_ic_write(ipu, ic_conf, IC_CONF);
++
++ return ret;
++}
++
++void _ipu_ic_uninit_pp(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~(IC_CONF_PP_EN | IC_CONF_PP_CSC1 | IC_CONF_PP_CSC2 |
++ IC_CONF_PP_CMB);
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++void _ipu_ic_init_rotate_pp(struct ipu_soc *ipu, ipu_channel_params_t *params)
++{
++}
++
++void _ipu_ic_uninit_rotate_pp(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ reg = ipu_ic_read(ipu, IC_CONF);
++ reg &= ~IC_CONF_PP_ROT_EN;
++ ipu_ic_write(ipu, reg, IC_CONF);
++}
++
++int _ipu_ic_idma_init(struct ipu_soc *ipu, int dma_chan,
++ uint16_t width, uint16_t height,
++ int burst_size, ipu_rotate_mode_t rot)
++{
++ u32 ic_idmac_1, ic_idmac_2, ic_idmac_3;
++ u32 temp_rot = bitrev8(rot) >> 5;
++ bool need_hor_flip = false;
++
++ if ((burst_size != 8) && (burst_size != 16)) {
++ dev_dbg(ipu->dev, "Illegal burst length for IC\n");
++ return -EINVAL;
++ }
++
++ width--;
++ height--;
++
++ if (temp_rot & 0x2) /* Need horizontal flip */
++ need_hor_flip = true;
++
++ ic_idmac_1 = ipu_ic_read(ipu, IC_IDMAC_1);
++ ic_idmac_2 = ipu_ic_read(ipu, IC_IDMAC_2);
++ ic_idmac_3 = ipu_ic_read(ipu, IC_IDMAC_3);
++ if (dma_chan == 22) { /* PP output - CB2 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB2_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB2_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PP_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PP_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PP_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PP_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PP_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PP_WIDTH_OFFSET;
++ } else if (dma_chan == 11) { /* PP Input - CB5 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB5_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB5_BURST_16;
++ } else if (dma_chan == 47) { /* PP Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PP_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PP_ROT_OFFSET;
++ }
++
++ if (dma_chan == 12) { /* PRP Input - CB6 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB6_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB6_BURST_16;
++ }
++
++ if (dma_chan == 20) { /* PRP ENC output - CB0 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB0_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB0_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PRPENC_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PRPENC_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PRPENC_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PRPENC_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PRPENC_WIDTH_OFFSET;
++
++ } else if (dma_chan == 45) { /* PRP ENC Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPENC_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPENC_ROT_OFFSET;
++ }
++
++ if (dma_chan == 21) { /* PRP VF output - CB1 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB1_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB1_BURST_16;
++
++ if (need_hor_flip)
++ ic_idmac_1 |= IC_IDMAC_1_PRPVF_FLIP_RS;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_FLIP_RS;
++
++ ic_idmac_2 &= ~IC_IDMAC_2_PRPVF_HEIGHT_MASK;
++ ic_idmac_2 |= height << IC_IDMAC_2_PRPVF_HEIGHT_OFFSET;
++
++ ic_idmac_3 &= ~IC_IDMAC_3_PRPVF_WIDTH_MASK;
++ ic_idmac_3 |= width << IC_IDMAC_3_PRPVF_WIDTH_OFFSET;
++
++ } else if (dma_chan == 46) { /* PRP VF Rot input */
++ ic_idmac_1 &= ~IC_IDMAC_1_PRPVF_ROT_MASK;
++ ic_idmac_1 |= temp_rot << IC_IDMAC_1_PRPVF_ROT_OFFSET;
++ }
++
++ if (dma_chan == 14) { /* PRP VF graphics combining input - CB3 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB3_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB3_BURST_16;
++ } else if (dma_chan == 15) { /* PP graphics combining input - CB4 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB4_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB4_BURST_16;
++ } else if (dma_chan == 5) { /* VDIC OUTPUT - CB7 */
++ if (burst_size == 16)
++ ic_idmac_1 |= IC_IDMAC_1_CB7_BURST_16;
++ else
++ ic_idmac_1 &= ~IC_IDMAC_1_CB7_BURST_16;
++ }
++
++ ipu_ic_write(ipu, ic_idmac_1, IC_IDMAC_1);
++ ipu_ic_write(ipu, ic_idmac_2, IC_IDMAC_2);
++ ipu_ic_write(ipu, ic_idmac_3, IC_IDMAC_3);
++ return 0;
++}
++
++static void _init_csc(struct ipu_soc *ipu, uint8_t ic_task, ipu_color_space_t in_format,
++ ipu_color_space_t out_format, int csc_index)
++{
++ /*
++ * Y = 0.257 * R + 0.504 * G + 0.098 * B + 16;
++ * U = -0.148 * R - 0.291 * G + 0.439 * B + 128;
++ * V = 0.439 * R - 0.368 * G - 0.071 * B + 128;
++ */
++ static const uint32_t rgb2ycbcr_coeff[4][3] = {
++ {0x0042, 0x0081, 0x0019},
++ {0x01DA, 0x01B6, 0x0070},
++ {0x0070, 0x01A2, 0x01EE},
++ {0x0040, 0x0200, 0x0200}, /* A0, A1, A2 */
++ };
++
++ /* transparent RGB->RGB matrix for combining
++ */
++ static const uint32_t rgb2rgb_coeff[4][3] = {
++ {0x0080, 0x0000, 0x0000},
++ {0x0000, 0x0080, 0x0000},
++ {0x0000, 0x0000, 0x0080},
++ {0x0000, 0x0000, 0x0000}, /* A0, A1, A2 */
++ };
++
++/* R = (1.164 * (Y - 16)) + (1.596 * (Cr - 128));
++ G = (1.164 * (Y - 16)) - (0.392 * (Cb - 128)) - (0.813 * (Cr - 128));
++ B = (1.164 * (Y - 16)) + (2.017 * (Cb - 128); */
++ static const uint32_t ycbcr2rgb_coeff[4][3] = {
++ {149, 0, 204},
++ {149, 462, 408},
++ {149, 255, 0},
++ {8192 - 446, 266, 8192 - 554}, /* A0, A1, A2 */
++ };
++
++ uint32_t param;
++ uint32_t *base = NULL;
++
++ if (ic_task == IC_TASK_ENCODER) {
++ base = (uint32_t *)ipu->tpmem_base + 0x2008 / 4;
++ } else if (ic_task == IC_TASK_VIEWFINDER) {
++ if (csc_index == 1)
++ base = (uint32_t *)ipu->tpmem_base + 0x4028 / 4;
++ else
++ base = (uint32_t *)ipu->tpmem_base + 0x4040 / 4;
++ } else if (ic_task == IC_TASK_POST_PROCESSOR) {
++ if (csc_index == 1)
++ base = (uint32_t *)ipu->tpmem_base + 0x6060 / 4;
++ else
++ base = (uint32_t *)ipu->tpmem_base + 0x6078 / 4;
++ } else {
++ BUG();
++ }
++
++ if ((in_format == YCbCr) && (out_format == RGB)) {
++ /* Init CSC (YCbCr->RGB) */
++ param = (ycbcr2rgb_coeff[3][0] << 27) |
++ (ycbcr2rgb_coeff[0][0] << 18) |
++ (ycbcr2rgb_coeff[1][1] << 9) | ycbcr2rgb_coeff[2][2];
++ writel(param, base++);
++ /* scale = 2, sat = 0 */
++ param = (ycbcr2rgb_coeff[3][0] >> 5) | (2L << (40 - 32));
++ writel(param, base++);
++
++ param = (ycbcr2rgb_coeff[3][1] << 27) |
++ (ycbcr2rgb_coeff[0][1] << 18) |
++ (ycbcr2rgb_coeff[1][0] << 9) | ycbcr2rgb_coeff[2][0];
++ writel(param, base++);
++ param = (ycbcr2rgb_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param = (ycbcr2rgb_coeff[3][2] << 27) |
++ (ycbcr2rgb_coeff[0][2] << 18) |
++ (ycbcr2rgb_coeff[1][2] << 9) | ycbcr2rgb_coeff[2][1];
++ writel(param, base++);
++ param = (ycbcr2rgb_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else if ((in_format == RGB) && (out_format == YCbCr)) {
++ /* Init CSC (RGB->YCbCr) */
++ param = (rgb2ycbcr_coeff[3][0] << 27) |
++ (rgb2ycbcr_coeff[0][0] << 18) |
++ (rgb2ycbcr_coeff[1][1] << 9) | rgb2ycbcr_coeff[2][2];
++ writel(param, base++);
++ /* scale = 1, sat = 0 */
++ param = (rgb2ycbcr_coeff[3][0] >> 5) | (1UL << 8);
++ writel(param, base++);
++
++ param = (rgb2ycbcr_coeff[3][1] << 27) |
++ (rgb2ycbcr_coeff[0][1] << 18) |
++ (rgb2ycbcr_coeff[1][0] << 9) | rgb2ycbcr_coeff[2][0];
++ writel(param, base++);
++ param = (rgb2ycbcr_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param = (rgb2ycbcr_coeff[3][2] << 27) |
++ (rgb2ycbcr_coeff[0][2] << 18) |
++ (rgb2ycbcr_coeff[1][2] << 9) | rgb2ycbcr_coeff[2][1];
++ writel(param, base++);
++ param = (rgb2ycbcr_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else if ((in_format == RGB) && (out_format == RGB)) {
++ /* Init CSC */
++ param =
++ (rgb2rgb_coeff[3][0] << 27) | (rgb2rgb_coeff[0][0] << 18) |
++ (rgb2rgb_coeff[1][1] << 9) | rgb2rgb_coeff[2][2];
++ writel(param, base++);
++ /* scale = 2, sat = 0 */
++ param = (rgb2rgb_coeff[3][0] >> 5) | (2UL << 8);
++ writel(param, base++);
++
++ param =
++ (rgb2rgb_coeff[3][1] << 27) | (rgb2rgb_coeff[0][1] << 18) |
++ (rgb2rgb_coeff[1][0] << 9) | rgb2rgb_coeff[2][0];
++ writel(param, base++);
++ param = (rgb2rgb_coeff[3][1] >> 5);
++ writel(param, base++);
++
++ param =
++ (rgb2rgb_coeff[3][2] << 27) | (rgb2rgb_coeff[0][2] << 18) |
++ (rgb2rgb_coeff[1][2] << 9) | rgb2rgb_coeff[2][1];
++ writel(param, base++);
++ param = (rgb2rgb_coeff[3][2] >> 5);
++ writel(param, base++);
++ } else {
++ dev_err(ipu->dev, "Unsupported color space conversion\n");
++ }
++}
++
++static int _calc_resize_coeffs(struct ipu_soc *ipu,
++ uint32_t inSize, uint32_t outSize,
++ uint32_t *resizeCoeff,
++ uint32_t *downsizeCoeff)
++{
++ uint32_t tempSize;
++ uint32_t tempDownsize;
++
++ if (inSize > 4096) {
++ dev_err(ipu->dev, "IC input size(%d) cannot exceed 4096\n",
++ inSize);
++ return -EINVAL;
++ }
++
++ if (outSize > 1024) {
++ dev_err(ipu->dev, "IC output size(%d) cannot exceed 1024\n",
++ outSize);
++ return -EINVAL;
++ }
++
++ if ((outSize << 3) < inSize) {
++ dev_err(ipu->dev, "IC cannot downsize more than 8:1\n");
++ return -EINVAL;
++ }
++
++ /* Compute downsizing coefficient */
++ /* Output of downsizing unit cannot be more than 1024 */
++ tempDownsize = 0;
++ tempSize = inSize;
++ while (((tempSize > 1024) || (tempSize >= outSize * 2)) &&
++ (tempDownsize < 2)) {
++ tempSize >>= 1;
++ tempDownsize++;
++ }
++ *downsizeCoeff = tempDownsize;
++
++ /* compute resizing coefficient using the following equation:
++ resizeCoeff = M*(SI -1)/(SO - 1)
++ where M = 2^13, SI - input size, SO - output size */
++ *resizeCoeff = (8192L * (tempSize - 1)) / (outSize - 1);
++ if (*resizeCoeff >= 16384L) {
++ dev_err(ipu->dev, "Overflow on IC resize coefficient.\n");
++ return -EINVAL;
++ }
++
++ dev_dbg(ipu->dev, "resizing from %u -> %u pixels, "
++ "downsize=%u, resize=%u.%lu (reg=%u)\n", inSize, outSize,
++ *downsizeCoeff, (*resizeCoeff >= 8192L) ? 1 : 0,
++ ((*resizeCoeff & 0x1FFF) * 10000L) / 8192L, *resizeCoeff);
++
++ return 0;
++}
++
++void _ipu_vdi_toggle_top_field_man(struct ipu_soc *ipu)
++{
++ uint32_t reg;
++ uint32_t mask_reg;
++
++ reg = ipu_vdi_read(ipu, VDI_C);
++ mask_reg = reg & VDI_C_TOP_FIELD_MAN_1;
++ if (mask_reg == VDI_C_TOP_FIELD_MAN_1)
++ reg &= ~VDI_C_TOP_FIELD_MAN_1;
++ else
++ reg |= VDI_C_TOP_FIELD_MAN_1;
++
++ ipu_vdi_write(ipu, reg, VDI_C);
++}
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_param_mem.h linux-xbian-imx6/drivers/mxc/ipu3/ipu_param_mem.h
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_param_mem.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_param_mem.h 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,921 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __INCLUDE_IPU_PARAM_MEM_H__
++#define __INCLUDE_IPU_PARAM_MEM_H__
++
++#include <linux/bitrev.h>
++#include <linux/types.h>
++
++#include "ipu_prv.h"
++
++extern u32 *ipu_cpmem_base;
++
++struct ipu_ch_param_word {
++ uint32_t data[5];
++ uint32_t res[3];
++};
++
++struct ipu_ch_param {
++ struct ipu_ch_param_word word[2];
++};
++
++#define ipu_ch_param_addr(ipu, ch) (((struct ipu_ch_param *)ipu->cpmem_base) + (ch))
++
++#define _param_word(base, w) \
++ (((struct ipu_ch_param *)(base))->word[(w)].data)
++
++#define ipu_ch_param_set_field(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ _param_word(base, w)[i] |= (v) << off; \
++ if (((bit)+(size)-1)/32 > i) { \
++ _param_word(base, w)[i + 1] |= (v) >> (off ? (32 - off) : 0); \
++ } \
++}
++
++#define ipu_ch_param_set_field_io(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ unsigned reg_offset; \
++ u32 temp; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp |= (v) << off; \
++ writel(temp, (u32 *)base + reg_offset); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp |= (v) >> (off ? (32 - off) : 0); \
++ writel(temp, (u32 *)base + reg_offset); \
++ } \
++}
++
++#define ipu_ch_param_mod_field(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ u32 temp = _param_word(base, w)[i]; \
++ temp &= ~(mask << off); \
++ _param_word(base, w)[i] = temp | (v) << off; \
++ if (((bit)+(size)-1)/32 > i) { \
++ temp = _param_word(base, w)[i + 1]; \
++ temp &= ~(mask >> (32 - off)); \
++ _param_word(base, w)[i + 1] = \
++ temp | ((v) >> (off ? (32 - off) : 0)); \
++ } \
++}
++
++#define ipu_ch_param_mod_field_io(base, w, bit, size, v) { \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ unsigned reg_offset; \
++ u32 temp; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp &= ~(mask << off); \
++ temp |= (v) << off; \
++ writel(temp, (u32 *)base + reg_offset); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp = readl((u32 *)base + reg_offset); \
++ temp &= ~(mask >> (32 - off)); \
++ temp |= ((v) >> (off ? (32 - off) : 0)); \
++ writel(temp, (u32 *)base + reg_offset); \
++ } \
++}
++
++#define ipu_ch_param_read_field(base, w, bit, size) ({ \
++ u32 temp2; \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ u32 temp1 = _param_word(base, w)[i]; \
++ temp1 = mask & (temp1 >> off); \
++ if (((bit)+(size)-1)/32 > i) { \
++ temp2 = _param_word(base, w)[i + 1]; \
++ temp2 &= mask >> (off ? (32 - off) : 0); \
++ temp1 |= temp2 << (off ? (32 - off) : 0); \
++ } \
++ temp1; \
++})
++
++#define ipu_ch_param_read_field_io(base, w, bit, size) ({ \
++ u32 temp1, temp2; \
++ int i = (bit) / 32; \
++ int off = (bit) % 32; \
++ u32 mask = (1UL << size) - 1; \
++ unsigned reg_offset; \
++ reg_offset = sizeof(struct ipu_ch_param_word) * w / 4; \
++ reg_offset += i; \
++ temp1 = readl((u32 *)base + reg_offset); \
++ temp1 = mask & (temp1 >> off); \
++ if (((bit)+(size)-1)/32 > i) { \
++ reg_offset++; \
++ temp2 = readl((u32 *)base + reg_offset); \
++ temp2 &= mask >> (off ? (32 - off) : 0); \
++ temp1 |= temp2 << (off ? (32 - off) : 0); \
++ } \
++ temp1; \
++})
++
++static inline int __ipu_ch_get_third_buf_cpmem_num(int ch)
++{
++ switch (ch) {
++ case 8:
++ return 64;
++ case 9:
++ return 65;
++ case 10:
++ return 66;
++ case 13:
++ return 67;
++ case 21:
++ return 68;
++ case 23:
++ return 69;
++ case 27:
++ return 70;
++ case 28:
++ return 71;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static inline void _ipu_ch_params_set_packing(struct ipu_ch_param *p,
++ int red_width, int red_offset,
++ int green_width, int green_offset,
++ int blue_width, int blue_offset,
++ int alpha_width, int alpha_offset)
++{
++ /* Setup red width and offset */
++ ipu_ch_param_set_field(p, 1, 116, 3, red_width - 1);
++ ipu_ch_param_set_field(p, 1, 128, 5, red_offset);
++ /* Setup green width and offset */
++ ipu_ch_param_set_field(p, 1, 119, 3, green_width - 1);
++ ipu_ch_param_set_field(p, 1, 133, 5, green_offset);
++ /* Setup blue width and offset */
++ ipu_ch_param_set_field(p, 1, 122, 3, blue_width - 1);
++ ipu_ch_param_set_field(p, 1, 138, 5, blue_offset);
++ /* Setup alpha width and offset */
++ ipu_ch_param_set_field(p, 1, 125, 3, alpha_width - 1);
++ ipu_ch_param_set_field(p, 1, 143, 5, alpha_offset);
++}
++
++static inline void _ipu_ch_param_dump(struct ipu_soc *ipu, int ch)
++{
++ struct ipu_ch_param *p = ipu_ch_param_addr(ipu, ch);
++ dev_dbg(ipu->dev, "ch %d word 0 - %08X %08X %08X %08X %08X\n", ch,
++ p->word[0].data[0], p->word[0].data[1], p->word[0].data[2],
++ p->word[0].data[3], p->word[0].data[4]);
++ dev_dbg(ipu->dev, "ch %d word 1 - %08X %08X %08X %08X %08X\n", ch,
++ p->word[1].data[0], p->word[1].data[1], p->word[1].data[2],
++ p->word[1].data[3], p->word[1].data[4]);
++ dev_dbg(ipu->dev, "PFS 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 85, 4));
++ dev_dbg(ipu->dev, "BPP 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 107, 3));
++ dev_dbg(ipu->dev, "NPB 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7));
++
++ dev_dbg(ipu->dev, "FW %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 125, 13));
++ dev_dbg(ipu->dev, "FH %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 138, 12));
++ dev_dbg(ipu->dev, "EBA0 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 0, 29) << 3);
++ dev_dbg(ipu->dev, "EBA1 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 29, 29) << 3);
++ dev_dbg(ipu->dev, "Stride %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14));
++ dev_dbg(ipu->dev, "scan_order %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 113, 1));
++ dev_dbg(ipu->dev, "uv_stride %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 128, 14));
++ dev_dbg(ipu->dev, "u_offset 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22) << 3);
++ dev_dbg(ipu->dev, "v_offset 0x%x\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22) << 3);
++
++ dev_dbg(ipu->dev, "Width0 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 116, 3));
++ dev_dbg(ipu->dev, "Width1 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 119, 3));
++ dev_dbg(ipu->dev, "Width2 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 122, 3));
++ dev_dbg(ipu->dev, "Width3 %d+1, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 125, 3));
++ dev_dbg(ipu->dev, "Offset0 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 128, 5));
++ dev_dbg(ipu->dev, "Offset1 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 133, 5));
++ dev_dbg(ipu->dev, "Offset2 %d, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 138, 5));
++ dev_dbg(ipu->dev, "Offset3 %d\n",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 143, 5));
++}
++
++static inline void fill_cpmem(struct ipu_soc *ipu, int ch, struct ipu_ch_param *params)
++{
++ int i, w;
++ void *addr = ipu_ch_param_addr(ipu, ch);
++
++ /* 2 words, 5 valid data */
++ for (w = 0; w < 2; w++) {
++ for (i = 0; i < 5; i++) {
++ writel(params->word[w].data[i], addr);
++ addr += 4;
++ }
++ addr += 12;
++ }
++}
++
++static inline void _ipu_ch_param_init(struct ipu_soc *ipu, int ch,
++ uint32_t pixel_fmt, uint32_t width,
++ uint32_t height, uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t uv_stride, dma_addr_t addr0,
++ dma_addr_t addr1, dma_addr_t addr2)
++{
++ uint32_t u_offset = 0;
++ uint32_t v_offset = 0;
++ int32_t sub_ch = 0;
++ struct ipu_ch_param params;
++
++ memset(&params, 0, sizeof(params));
++
++ ipu_ch_param_set_field(&params, 0, 125, 13, width - 1);
++
++ if (((ch == 8) || (ch == 9) || (ch == 10)) && !ipu->vdoa_en) {
++ ipu_ch_param_set_field(&params, 0, 138, 12, (height / 2) - 1);
++ ipu_ch_param_set_field(&params, 1, 102, 14, (stride * 2) - 1);
++ } else {
++ /* note: for vdoa+vdi- ch8/9/10, always use band mode */
++ ipu_ch_param_set_field(&params, 0, 138, 12, height - 1);
++ ipu_ch_param_set_field(&params, 1, 102, 14, stride - 1);
++ }
++
++ /* EBA is 8-byte aligned */
++ ipu_ch_param_set_field(&params, 1, 0, 29, addr0 >> 3);
++ ipu_ch_param_set_field(&params, 1, 29, 29, addr1 >> 3);
++ if (addr0%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's EBA0 is not 8-byte aligned\n", ch);
++ if (addr1%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's EBA1 is not 8-byte aligned\n", ch);
++
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ /*Represents 8-bit Generic data */
++ ipu_ch_param_set_field(&params, 0, 107, 3, 5); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 6); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 63); /* burst size */
++
++ break;
++ case IPU_PIX_FMT_GENERIC_16:
++ /* Represents 16-bit generic data */
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 6); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ break;
++ case IPU_PIX_FMT_GENERIC_32:
++ /*Represents 32-bit Generic data */
++ break;
++ case IPU_PIX_FMT_RGB565:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 5, 0, 6, 5, 5, 11, 8, 16);
++ break;
++ case IPU_PIX_FMT_BGR24:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 0, 8, 8, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 16, 8, 8, 8, 0, 8, 24);
++ break;
++ case IPU_PIX_FMT_VYU444:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 1); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 19); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 8, 8, 0, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 8, 8, 16, 8, 24, 8, 0);
++ break;
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 24, 8, 16, 8, 8, 8, 0);
++ break;
++ case IPU_PIX_FMT_ABGR32:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 0); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 7); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++
++ _ipu_ch_params_set_packing(&params, 8, 0, 8, 8, 8, 16, 8, 24);
++ break;
++ case IPU_PIX_FMT_UYVY:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0xA); /* pix format */
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YUYV:
++ ipu_ch_param_set_field(&params, 0, 107, 3, 3); /* bits/pixel */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0x8); /* pix format */
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ if (ipu->vdoa_en) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31);
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15);
++ }
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ ipu_ch_param_set_field(&params, 1, 85, 4, 2); /* pix format */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * height;
++ v_offset = u_offset + (uv_stride * height / 2);
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ uv_stride = uv_stride*2;
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 63); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ ipu_ch_param_set_field(&params, 1, 85, 4, 2); /* pix format */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * height;
++ u_offset = v_offset + (uv_stride * height / 2);
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15); /* burst size */
++ uv_stride = uv_stride*2;
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = (v == 0) ? stride * height : v;
++ u_offset = (u == 0) ? v_offset + v_offset / 2 : u;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 1); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = (u == 0) ? stride * height : u;
++ v_offset = (v == 0) ? u_offset + u_offset / 2 : v;
++ break;
++ case IPU_PIX_FMT_YUV444P:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 0); /* pix format */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ uv_stride = stride;
++ u_offset = (u == 0) ? stride * height : u;
++ v_offset = (v == 0) ? u_offset * 2 : v;
++ break;
++ case IPU_PIX_FMT_NV12:
++ /* BPP & pixel format */
++ ipu_ch_param_set_field(&params, 1, 85, 4, 4); /* pix format */
++ uv_stride = stride;
++ u_offset = (u == 0) ? stride * height : u;
++ if ((ch == 8) || (ch == 9) || (ch == 10)) {
++ if (ipu->vdoa_en) {
++ /* one field buffer, memory width 64bits */
++ ipu_ch_param_set_field(&params, 1, 78, 7, 63);
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 15);
++ /* top/bottom field in one buffer*/
++ uv_stride = uv_stride*2;
++ }
++ } else {
++ ipu_ch_param_set_field(&params, 1, 78, 7, 31); /* burst size */
++ }
++ break;
++ default:
++ dev_err(ipu->dev, "mxc ipu: unimplemented pixel format\n");
++ break;
++ }
++ /*set burst size to 16*/
++
++
++ if (uv_stride)
++ ipu_ch_param_set_field(&params, 1, 128, 14, uv_stride - 1);
++
++ /* Get the uv offset from user when need cropping */
++ if (u || v) {
++ u_offset = u;
++ v_offset = v;
++ }
++
++ /* UBO and VBO are 22-bit and 8-byte aligned */
++ if (u_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset exceeds IPU limitation\n", ch);
++ if (v_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset exceeds IPU limitation\n", ch);
++ if (u_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset is not 8-byte aligned\n", ch);
++ if (v_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset is not 8-byte aligned\n", ch);
++
++ ipu_ch_param_set_field(&params, 0, 46, 22, u_offset / 8);
++ ipu_ch_param_set_field(&params, 0, 68, 22, v_offset / 8);
++
++ dev_dbg(ipu->dev, "initializing idma ch %d @ %p\n", ch, ipu_ch_param_addr(ipu, ch));
++ fill_cpmem(ipu, ch, &params);
++ if (addr2) {
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++
++ ipu_ch_param_set_field(&params, 1, 0, 29, addr2 >> 3);
++ ipu_ch_param_set_field(&params, 1, 29, 29, 0);
++ if (addr2%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's sub-CPMEM entry%d EBA0 is not "
++ "8-byte aligned\n", ch, sub_ch);
++
++ dev_dbg(ipu->dev, "initializing idma ch %d @ %p sub cpmem\n", ch,
++ ipu_ch_param_addr(ipu, sub_ch));
++ fill_cpmem(ipu, sub_ch, &params);
++ }
++};
++
++static inline void _ipu_ch_param_set_burst_size(struct ipu_soc *ipu,
++ uint32_t ch,
++ uint16_t burst_pixels)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7,
++ burst_pixels - 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 78, 7,
++ burst_pixels - 1);
++};
++
++static inline int _ipu_ch_param_get_burst_size(struct ipu_soc *ipu, uint32_t ch)
++{
++ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 78, 7) + 1;
++};
++
++static inline int _ipu_ch_param_get_bpp(struct ipu_soc *ipu, uint32_t ch)
++{
++ return ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 107, 3);
++};
++
++static inline void _ipu_ch_param_set_buffer(struct ipu_soc *ipu, uint32_t ch,
++ int bufNum, dma_addr_t phyaddr)
++{
++ if (bufNum == 2) {
++ ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (ch <= 0)
++ return;
++ bufNum = 0;
++ }
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 29 * bufNum, 29,
++ phyaddr / 8);
++};
++
++static inline void _ipu_ch_param_set_rotation(struct ipu_soc *ipu, uint32_t ch,
++ ipu_rotate_mode_t rot)
++{
++ u32 temp_rot = bitrev8(rot) >> 5;
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 119, 3, temp_rot);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 119, 3, temp_rot);
++};
++
++static inline void _ipu_ch_param_set_block_mode(struct ipu_soc *ipu, uint32_t ch)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 117, 2, 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 117, 2, 1);
++};
++
++static inline void _ipu_ch_param_set_alpha_use_separate_channel(struct ipu_soc *ipu,
++ uint32_t ch,
++ bool option)
++{
++ int32_t sub_ch = 0;
++
++ if (option) {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 89, 1, 1);
++ } else {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 89, 1, 0);
++ }
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++
++ if (option) {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 89, 1, 1);
++ } else {
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 89, 1, 0);
++ }
++};
++
++static inline void _ipu_ch_param_set_alpha_condition_read(struct ipu_soc *ipu, uint32_t ch)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 149, 1, 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 149, 1, 1);
++};
++
++static inline void _ipu_ch_param_set_alpha_buffer_memory(struct ipu_soc *ipu, uint32_t ch)
++{
++ int alp_mem_idx;
++ int32_t sub_ch = 0;
++
++ switch (ch) {
++ case 14: /* PRP graphic */
++ alp_mem_idx = 0;
++ break;
++ case 15: /* PP graphic */
++ alp_mem_idx = 1;
++ break;
++ case 23: /* DP BG SYNC graphic */
++ alp_mem_idx = 4;
++ break;
++ case 27: /* DP FG SYNC graphic */
++ alp_mem_idx = 2;
++ break;
++ default:
++ dev_err(ipu->dev, "unsupported correlative channel of local "
++ "alpha channel\n");
++ return;
++ }
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 90, 3, alp_mem_idx);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 90, 3, alp_mem_idx);
++};
++
++static inline void _ipu_ch_param_set_interlaced_scan(struct ipu_soc *ipu, uint32_t ch)
++{
++ u32 stride;
++ int32_t sub_ch = 0;
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch), 0, 113, 1, 1);
++ if (sub_ch > 0)
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 113, 1, 1);
++ stride = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14) + 1;
++ /* ILO is 20-bit and 8-byte aligned */
++ if (stride/8 > 0xfffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's ILO exceeds IPU limitation\n", ch);
++ if (stride%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's ILO is not 8-byte aligned\n", ch);
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 58, 20, stride / 8);
++ if (sub_ch > 0)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 58, 20,
++ stride / 8);
++ stride *= 2;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 102, 14, stride - 1);
++ if (sub_ch > 0)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 102, 14,
++ stride - 1);
++};
++
++static inline void _ipu_ch_param_set_axi_id(struct ipu_soc *ipu, uint32_t ch, uint32_t id)
++{
++ int32_t sub_ch = 0;
++
++ id %= 4;
++
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 1, 93, 2, id);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 93, 2, id);
++};
++
++/* IDMAC U/V offset changing support */
++/* U and V input is not affected, */
++/* the update is done by new calculation according to */
++/* vertical_offset and horizontal_offset */
++static inline void _ipu_ch_offset_update(struct ipu_soc *ipu,
++ int ch,
++ uint32_t pixel_fmt,
++ uint32_t width,
++ uint32_t height,
++ uint32_t stride,
++ uint32_t u,
++ uint32_t v,
++ uint32_t uv_stride,
++ uint32_t vertical_offset,
++ uint32_t horizontal_offset)
++{
++ uint32_t u_offset = 0;
++ uint32_t v_offset = 0;
++ uint32_t old_offset = 0;
++ uint32_t u_fix = 0;
++ uint32_t v_fix = 0;
++ int32_t sub_ch = 0;
++
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_GENERIC:
++ case IPU_PIX_FMT_GENERIC_16:
++ case IPU_PIX_FMT_GENERIC_32:
++ case IPU_PIX_FMT_RGB565:
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ case IPU_PIX_FMT_YUV444:
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ case IPU_PIX_FMT_ABGR32:
++ case IPU_PIX_FMT_UYVY:
++ case IPU_PIX_FMT_YUYV:
++ break;
++
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YUV420P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset / 2;
++ v_offset = u_offset + (uv_stride * height / 2);
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++
++ break;
++ case IPU_PIX_FMT_YVU420P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset / 2;
++ u_offset = v_offset + (uv_stride * height / 2);
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset / 2) +
++ (horizontal_offset / 2) -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++
++ break;
++ case IPU_PIX_FMT_YVU422P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ v_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset / 2;
++ u_offset = v_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++ break;
++ case IPU_PIX_FMT_YUV422P:
++ if (uv_stride < stride / 2)
++ uv_stride = stride / 2;
++
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset / 2;
++ v_offset = u_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset / 2 -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ v_offset;
++ break;
++
++ case IPU_PIX_FMT_YUV444P:
++ uv_stride = stride;
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset) +
++ horizontal_offset;
++ v_offset = u_offset + uv_stride * height;
++ u_fix = u ? (u + (uv_stride * vertical_offset) +
++ horizontal_offset -
++ (stride * vertical_offset) -
++ (horizontal_offset)) :
++ u_offset;
++ v_fix = v ? (v + (uv_stride * vertical_offset) +
++ horizontal_offset -
++ (stride * vertical_offset) -
++ (horizontal_offset)) :
++ v_offset;
++ break;
++ case IPU_PIX_FMT_NV12:
++ uv_stride = stride;
++ u_offset = stride * (height - vertical_offset - 1) +
++ (stride - horizontal_offset) +
++ (uv_stride * vertical_offset / 2) +
++ horizontal_offset;
++ u_fix = u ? (u + (uv_stride * vertical_offset / 2) +
++ horizontal_offset -
++ (stride * vertical_offset) - (horizontal_offset)) :
++ u_offset;
++
++ break;
++ default:
++ dev_err(ipu->dev, "mxc ipu: unimplemented pixel format\n");
++ break;
++ }
++
++
++
++ if (u_fix > u_offset)
++ u_offset = u_fix;
++
++ if (v_fix > v_offset)
++ v_offset = v_fix;
++
++ /* UBO and VBO are 22-bit and 8-byte aligned */
++ if (u_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset exceeds IPU limitation\n", ch);
++ if (v_offset/8 > 0x3fffff)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset exceeds IPU limitation\n", ch);
++ if (u_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's U offset is not 8-byte aligned\n", ch);
++ if (v_offset%8)
++ dev_warn(ipu->dev,
++ "IDMAC%d's V offset is not 8-byte aligned\n", ch);
++
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22);
++ if (old_offset != u_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 46, 22, u_offset / 8);
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22);
++ if (old_offset != v_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, ch), 0, 68, 22, v_offset / 8);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 46, 22);
++ if (old_offset != u_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 46, 22, u_offset / 8);
++ old_offset = ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 68, 22);
++ if (old_offset != v_offset / 8)
++ ipu_ch_param_mod_field_io(ipu_ch_param_addr(ipu, sub_ch), 0, 68, 22, v_offset / 8);
++};
++
++static inline void _ipu_ch_params_set_alpha_width(struct ipu_soc *ipu, uint32_t ch, int alpha_width)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch), 1, 125, 3, alpha_width - 1);
++
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch), 1, 125, 3, alpha_width - 1);
++};
++
++static inline void _ipu_ch_param_set_bandmode(struct ipu_soc *ipu,
++ uint32_t ch, uint32_t band_height)
++{
++ int32_t sub_ch = 0;
++
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, ch),
++ 0, 114, 3, band_height - 1);
++ sub_ch = __ipu_ch_get_third_buf_cpmem_num(ch);
++ if (sub_ch <= 0)
++ return;
++ ipu_ch_param_set_field_io(ipu_ch_param_addr(ipu, sub_ch),
++ 0, 114, 3, band_height - 1);
++
++ dev_dbg(ipu->dev, "BNDM 0x%x, ",
++ ipu_ch_param_read_field_io(ipu_ch_param_addr(ipu, ch), 0, 114, 3));
++}
++
++/*
++ * The IPUv3 IDMAC has a bug to read 32bpp pixels from a graphics plane
++ * whose alpha component is at the most significant 8 bits. The bug only
++ * impacts on cases in which the relevant separate alpha channel is enabled.
++ *
++ * Return true on bad alpha component position, otherwise, return false.
++ */
++static inline bool _ipu_ch_param_bad_alpha_pos(uint32_t pixel_fmt)
++{
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_BGRA32:
++ case IPU_PIX_FMT_BGR32:
++ case IPU_PIX_FMT_RGBA32:
++ case IPU_PIX_FMT_RGB32:
++ return true;
++ }
++
++ return false;
++}
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_pixel_clk.c linux-xbian-imx6/drivers/mxc/ipu3/ipu_pixel_clk.c
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_pixel_clk.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_pixel_clk.c 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,317 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file ipu_pixel_clk.c
++ *
++ * @brief IPU pixel clock implementation
++ *
++ * @ingroup IPU
++ */
++
++#include <linux/clk-provider.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/ipu-v3.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++
++#include "ipu_prv.h"
++#include "ipu_regs.h"
++
++ /*
++ * muxd clock implementation
++ */
++struct clk_di_mux {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++ u8 index;
++};
++#define to_clk_di_mux(_hw) container_of(_hw, struct clk_di_mux, hw)
++
++static int _ipu_pixel_clk_set_parent(struct clk_hw *hw, u8 index)
++{
++ struct clk_di_mux *mux = to_clk_di_mux(hw);
++ struct ipu_soc *ipu = ipu_get_soc(mux->ipu_id);
++ u32 di_gen;
++
++ di_gen = ipu_di_read(ipu, mux->di_id, DI_GENERAL);
++ if (index == 0)
++ /* ipu1_clk or ipu2_clk internal clk */
++ di_gen &= ~DI_GEN_DI_CLK_EXT;
++ else
++ di_gen |= DI_GEN_DI_CLK_EXT;
++
++ ipu_di_write(ipu, mux->di_id, di_gen, DI_GENERAL);
++ mux->index = index;
++ pr_debug("ipu_pixel_clk: di_clk_ext:0x%x, di_gen reg:0x%x.\n",
++ !(di_gen & DI_GEN_DI_CLK_EXT), di_gen);
++ return 0;
++}
++
++static u8 _ipu_pixel_clk_get_parent(struct clk_hw *hw)
++{
++ struct clk_di_mux *mux = to_clk_di_mux(hw);
++
++ return mux->index;
++}
++
++const struct clk_ops clk_mux_di_ops = {
++ .get_parent = _ipu_pixel_clk_get_parent,
++ .set_parent = _ipu_pixel_clk_set_parent,
++};
++
++struct clk *clk_register_mux_pix_clk(struct device *dev, const char *name,
++ const char **parent_names, u8 num_parents, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_mux_flags)
++{
++ struct clk_di_mux *mux;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ mux = kzalloc(sizeof(struct clk_di_mux), GFP_KERNEL);
++ if (!mux)
++ return ERR_PTR(-ENOMEM);
++
++ init.name = name;
++ init.ops = &clk_mux_di_ops;
++ init.flags = flags;
++ init.parent_names = parent_names;
++ init.num_parents = num_parents;
++
++ mux->ipu_id = ipu_id;
++ mux->di_id = di_id;
++ mux->flags = clk_mux_flags | CLK_SET_RATE_PARENT;
++ mux->hw.init = &init;
++
++ clk = clk_register(dev, &mux->hw);
++ if (IS_ERR(clk))
++ kfree(mux);
++
++ return clk;
++}
++
++/*
++ * Gated clock implementation
++ */
++struct clk_di_div {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++};
++#define to_clk_di_div(_hw) container_of(_hw, struct clk_di_div, hw)
++
++static unsigned long _ipu_pixel_clk_div_recalc_rate(struct clk_hw *hw,
++ unsigned long parent_rate)
++{
++ struct clk_di_div *di_div = to_clk_di_div(hw);
++ struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id);
++ u32 div;
++ u64 final_rate = (unsigned long long)parent_rate * 16;
++
++ _ipu_get(ipu);
++ div = ipu_di_read(ipu, di_div->di_id, DI_BS_CLKGEN0);
++ _ipu_put(ipu);
++ pr_debug("ipu_di%d read BS_CLKGEN0 div:%d, final_rate:%lld, prate:%ld\n",
++ di_div->di_id, div, final_rate, parent_rate);
++
++ if (div == 0)
++ return 0;
++ do_div(final_rate, div);
++
++ return (unsigned long)final_rate;
++}
++
++static long _ipu_pixel_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long *parent_clk_rate)
++{
++ u64 div, final_rate;
++ u32 remainder;
++ u64 parent_rate = (unsigned long long)(*parent_clk_rate) * 16;
++
++ /*
++ * Calculate divider
++ * Fractional part is 4 bits,
++ * so simply multiply by 2^4 to get fractional part.
++ */
++ div = parent_rate;
++ remainder = do_div(div, rate);
++ /* Round the divider value */
++ if (remainder > (rate/2))
++ div++;
++ if (div < 0x10) /* Min DI disp clock divider is 1 */
++ div = 0x10;
++ if (div & ~0xFEF)
++ div &= 0xFF8;
++ else {
++ /* Round up divider if it gets us closer to desired pix clk */
++ if ((div & 0xC) == 0xC) {
++ div += 0x10;
++ div &= ~0xF;
++ }
++ }
++ final_rate = parent_rate;
++ do_div(final_rate, div);
++
++ return final_rate;
++}
++
++static int _ipu_pixel_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
++ unsigned long parent_clk_rate)
++{
++ struct clk_di_div *di_div = to_clk_di_div(hw);
++ struct ipu_soc *ipu = ipu_get_soc(di_div->ipu_id);
++ u64 div, parent_rate;
++ u32 remainder;
++
++ parent_rate = (unsigned long long)parent_clk_rate * 16;
++ div = parent_rate;
++ remainder = do_div(div, rate);
++ /* Round the divider value */
++ if (remainder > (rate/2))
++ div++;
++
++ /* Round up divider if it gets us closer to desired pix clk */
++ if ((div & 0xC) == 0xC) {
++ div += 0x10;
++ div &= ~0xF;
++ }
++ if (div > 0x1000)
++ pr_err("Overflow, di:%d, DI_BS_CLKGEN0 div:0x%x\n",
++ di_div->di_id, (u32)div);
++ _ipu_get(ipu);
++ ipu_di_write(ipu, di_div->di_id, (u32)div, DI_BS_CLKGEN0);
++
++ /* Setup pixel clock timing */
++ /* FIXME: needs to be more flexible */
++ /* Down time is half of period */
++ ipu_di_write(ipu, di_div->di_id, ((u32)div / 16) << 16, DI_BS_CLKGEN1);
++ _ipu_put(ipu);
++
++ return 0;
++}
++
++static struct clk_ops clk_div_ops = {
++ .recalc_rate = _ipu_pixel_clk_div_recalc_rate,
++ .round_rate = _ipu_pixel_clk_div_round_rate,
++ .set_rate = _ipu_pixel_clk_div_set_rate,
++};
++
++struct clk *clk_register_div_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_div_flags)
++{
++ struct clk_di_div *di_div;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ di_div = kzalloc(sizeof(struct clk_di_div), GFP_KERNEL);
++ if (!di_div)
++ return ERR_PTR(-ENOMEM);
++
++ /* struct clk_di_div assignments */
++ di_div->ipu_id = ipu_id;
++ di_div->di_id = di_id;
++ di_div->flags = clk_div_flags;
++
++ init.name = name;
++ init.ops = &clk_div_ops;
++ init.flags = flags | CLK_SET_RATE_PARENT;
++ init.parent_names = parent_name ? &parent_name : NULL;
++ init.num_parents = parent_name ? 1 : 0;
++
++ di_div->hw.init = &init;
++
++ clk = clk_register(dev, &di_div->hw);
++ if (IS_ERR(clk))
++ kfree(clk);
++
++ return clk;
++}
++
++/*
++ * Gated clock implementation
++ */
++struct clk_di_gate {
++ struct clk_hw hw;
++ u8 ipu_id;
++ u8 di_id;
++ u8 flags;
++};
++#define to_clk_di_gate(_hw) container_of(_hw, struct clk_di_gate, hw)
++
++static int _ipu_pixel_clk_enable(struct clk_hw *hw)
++{
++ struct clk_di_gate *gate = to_clk_di_gate(hw);
++ struct ipu_soc *ipu = ipu_get_soc(gate->ipu_id);
++ u32 disp_gen;
++
++ disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
++ disp_gen |= gate->di_id ? DI1_COUNTER_RELEASE : DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
++
++ return 0;
++}
++
++static void _ipu_pixel_clk_disable(struct clk_hw *hw)
++{
++ struct clk_di_gate *gate = to_clk_di_gate(hw);
++ struct ipu_soc *ipu = ipu_get_soc(gate->ipu_id);
++ u32 disp_gen;
++
++ disp_gen = ipu_cm_read(ipu, IPU_DISP_GEN);
++ disp_gen &= gate->di_id ? ~DI1_COUNTER_RELEASE : ~DI0_COUNTER_RELEASE;
++ ipu_cm_write(ipu, disp_gen, IPU_DISP_GEN);
++
++}
++
++
++static struct clk_ops clk_gate_di_ops = {
++ .enable = _ipu_pixel_clk_enable,
++ .disable = _ipu_pixel_clk_disable,
++};
++
++struct clk *clk_register_gate_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_gate_flags)
++{
++ struct clk_di_gate *gate;
++ struct clk *clk;
++ struct clk_init_data init;
++
++ gate = kzalloc(sizeof(struct clk_di_gate), GFP_KERNEL);
++ if (!gate)
++ return ERR_PTR(-ENOMEM);
++
++ gate->ipu_id = ipu_id;
++ gate->di_id = di_id;
++ gate->flags = clk_gate_flags;
++
++ init.name = name;
++ init.ops = &clk_gate_di_ops;
++ init.flags = flags | CLK_SET_RATE_PARENT;
++ init.parent_names = parent_name ? &parent_name : NULL;
++ init.num_parents = parent_name ? 1 : 0;
++
++ gate->hw.init = &init;
++
++ clk = clk_register(dev, &gate->hw);
++ if (IS_ERR(clk))
++ kfree(clk);
++
++ return clk;
++}
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_prv.h linux-xbian-imx6/drivers/mxc/ipu3/ipu_prv.h
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_prv.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_prv.h 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,363 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __INCLUDE_IPU_PRV_H__
++#define __INCLUDE_IPU_PRV_H__
++
++#include <linux/clkdev.h>
++#include <linux/device.h>
++#include <linux/fsl_devices.h>
++#include <linux/interrupt.h>
++#include <linux/types.h>
++
++#define MXC_IPU_MAX_NUM 2
++#define MXC_DI_NUM_PER_IPU 2
++
++/* Globals */
++extern int dmfc_type_setup;
++
++#define IDMA_CHAN_INVALID 0xFF
++#define HIGH_RESOLUTION_WIDTH 1024
++
++struct ipu_irq_node {
++ irqreturn_t(*handler) (int, void *); /*!< the ISR */
++ const char *name; /*!< device associated with the interrupt */
++ void *dev_id; /*!< some unique information for the ISR */
++ __u32 flags; /*!< not used */
++};
++
++enum csc_type_t {
++ RGB2YUV = 0,
++ YUV2RGB,
++ RGB2RGB,
++ YUV2YUV,
++ CSC_NONE,
++ CSC_NUM
++};
++
++enum imx_ipu_type {
++ IMX6Q_IPU,
++};
++
++struct ipu_pltfm_data {
++ u32 id;
++ u32 devtype;
++ int (*init) (int);
++ void (*pg) (int);
++
++ /*
++ * Bypass reset to avoid display channel being
++ * stopped by probe since it may starts to work
++ * in bootloader.
++ */
++ bool bypass_reset;
++};
++
++struct ipu_soc {
++ bool online;
++ struct ipu_pltfm_data *pdata;
++
++ /*clk*/
++ struct clk *ipu_clk;
++ struct clk *di_clk[2];
++ struct clk *di_clk_sel[2];
++ struct clk *pixel_clk[2];
++ struct clk *pixel_clk_sel[2];
++ struct clk *csi_clk[2];
++
++ /*irq*/
++ int irq_sync;
++ int irq_err;
++ struct ipu_irq_node irq_list[IPU_IRQ_COUNT];
++
++ /*reg*/
++ void __iomem *cm_reg;
++ void __iomem *idmac_reg;
++ void __iomem *dp_reg;
++ void __iomem *ic_reg;
++ void __iomem *dc_reg;
++ void __iomem *dc_tmpl_reg;
++ void __iomem *dmfc_reg;
++ void __iomem *di_reg[2];
++ void __iomem *smfc_reg;
++ void __iomem *csi_reg[2];
++ void __iomem *cpmem_base;
++ void __iomem *tpmem_base;
++ void __iomem *disp_base[2];
++ void __iomem *vdi_reg;
++
++ struct device *dev;
++
++ ipu_channel_t csi_channel[2];
++ ipu_channel_t using_ic_dirct_ch;
++ unsigned char dc_di_assignment[10];
++ bool sec_chan_en[24];
++ bool thrd_chan_en[24];
++ bool chan_is_interlaced[52];
++ uint32_t channel_init_mask;
++ uint32_t channel_enable_mask;
++
++ /*use count*/
++ int dc_use_count;
++ int dp_use_count;
++ int dmfc_use_count;
++ int smfc_use_count;
++ int ic_use_count;
++ int rot_use_count;
++ int vdi_use_count;
++ int di_use_count[2];
++ int csi_use_count[2];
++
++ struct mutex mutex_lock;
++ spinlock_t int_reg_spin_lock;
++ spinlock_t rdy_reg_spin_lock;
++
++ int dmfc_size_28;
++ int dmfc_size_29;
++ int dmfc_size_24;
++ int dmfc_size_27;
++ int dmfc_size_23;
++
++ enum csc_type_t fg_csc_type;
++ enum csc_type_t bg_csc_type;
++ bool color_key_4rgb;
++ bool dc_swap;
++ struct completion dc_comp;
++ struct completion csi_comp;
++
++ struct rot_mem {
++ void *vaddr;
++ dma_addr_t paddr;
++ int size;
++ } rot_dma[2];
++
++ int vdoa_en;
++ struct task_struct *thread[2];
++
++ char pixel_clk_0[12];
++ char pixel_clk_1[12];
++ char pixel_clk_0_sel[15];
++ char pixel_clk_1_sel[15];
++ char pixel_clk_0_div[15];
++ char pixel_clk_1_div[15];
++ char ipu_pixel_clk_sel[3][9];
++};
++
++struct ipu_channel {
++ u8 video_in_dma;
++ u8 alpha_in_dma;
++ u8 graph_in_dma;
++ u8 out_dma;
++};
++
++enum ipu_dmfc_type {
++ DMFC_NORMAL = 0,
++ DMFC_HIGH_RESOLUTION_DC,
++ DMFC_HIGH_RESOLUTION_DP,
++ DMFC_HIGH_RESOLUTION_ONLY_DP,
++};
++
++static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->cm_reg + offset);
++}
++
++static inline void ipu_cm_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->cm_reg + offset);
++}
++
++static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->idmac_reg + offset);
++}
++
++static inline void ipu_idmac_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->idmac_reg + offset);
++}
++
++static inline u32 ipu_dc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dc_reg + offset);
++}
++
++static inline void ipu_dc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dc_reg + offset);
++}
++
++static inline u32 ipu_dc_tmpl_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dc_tmpl_reg + offset);
++}
++
++static inline void ipu_dc_tmpl_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dc_tmpl_reg + offset);
++}
++
++static inline u32 ipu_dmfc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dmfc_reg + offset);
++}
++
++static inline void ipu_dmfc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dmfc_reg + offset);
++}
++
++static inline u32 ipu_dp_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->dp_reg + offset);
++}
++
++static inline void ipu_dp_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->dp_reg + offset);
++}
++
++static inline u32 ipu_di_read(struct ipu_soc *ipu, int di, unsigned offset)
++{
++ return readl(ipu->di_reg[di] + offset);
++}
++
++static inline void ipu_di_write(struct ipu_soc *ipu, int di,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->di_reg[di] + offset);
++}
++
++static inline u32 ipu_csi_read(struct ipu_soc *ipu, int csi, unsigned offset)
++{
++ return readl(ipu->csi_reg[csi] + offset);
++}
++
++static inline void ipu_csi_write(struct ipu_soc *ipu, int csi,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->csi_reg[csi] + offset);
++}
++
++static inline u32 ipu_smfc_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->smfc_reg + offset);
++}
++
++static inline void ipu_smfc_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->smfc_reg + offset);
++}
++
++static inline u32 ipu_vdi_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->vdi_reg + offset);
++}
++
++static inline void ipu_vdi_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->vdi_reg + offset);
++}
++
++static inline u32 ipu_ic_read(struct ipu_soc *ipu, unsigned offset)
++{
++ return readl(ipu->ic_reg + offset);
++}
++
++static inline void ipu_ic_write(struct ipu_soc *ipu,
++ u32 value, unsigned offset)
++{
++ writel(value, ipu->ic_reg + offset);
++}
++
++int register_ipu_device(struct ipu_soc *ipu, int id);
++void unregister_ipu_device(struct ipu_soc *ipu, int id);
++ipu_color_space_t format_to_colorspace(uint32_t fmt);
++bool ipu_pixel_format_has_alpha(uint32_t fmt);
++
++void ipu_dump_registers(struct ipu_soc *ipu);
++
++uint32_t _ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel);
++
++void ipu_disp_init(struct ipu_soc *ipu);
++void _ipu_init_dc_mappings(struct ipu_soc *ipu);
++int _ipu_dp_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t in_pixel_fmt,
++ uint32_t out_pixel_fmt);
++void _ipu_dp_uninit(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_dc_init(struct ipu_soc *ipu, int dc_chan, int di, bool interlaced, uint32_t pixel_fmt);
++void _ipu_dc_uninit(struct ipu_soc *ipu, int dc_chan);
++void _ipu_dp_dc_enable(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_dp_dc_disable(struct ipu_soc *ipu, ipu_channel_t channel, bool swap);
++void _ipu_dmfc_init(struct ipu_soc *ipu, int dmfc_type, int first);
++void _ipu_dmfc_set_wait4eot(struct ipu_soc *ipu, int dma_chan, int width);
++void _ipu_dmfc_set_burst_size(struct ipu_soc *ipu, int dma_chan, int burst_size);
++int _ipu_disp_chan_is_interlaced(struct ipu_soc *ipu, ipu_channel_t channel);
++
++void _ipu_ic_enable_task(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_ic_disable_task(struct ipu_soc *ipu, ipu_channel_t channel);
++int _ipu_ic_init_prpvf(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi);
++void _ipu_vdi_init(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params);
++void _ipu_vdi_uninit(struct ipu_soc *ipu);
++void _ipu_ic_uninit_prpvf(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_vf(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_vf(struct ipu_soc *ipu);
++void _ipu_ic_init_csi(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_csi(struct ipu_soc *ipu);
++int _ipu_ic_init_prpenc(struct ipu_soc *ipu, ipu_channel_params_t *params,
++ bool src_is_csi);
++void _ipu_ic_uninit_prpenc(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_enc(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_enc(struct ipu_soc *ipu);
++int _ipu_ic_init_pp(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_pp(struct ipu_soc *ipu);
++void _ipu_ic_init_rotate_pp(struct ipu_soc *ipu, ipu_channel_params_t *params);
++void _ipu_ic_uninit_rotate_pp(struct ipu_soc *ipu);
++int _ipu_ic_idma_init(struct ipu_soc *ipu, int dma_chan, uint16_t width, uint16_t height,
++ int burst_size, ipu_rotate_mode_t rot);
++void _ipu_vdi_toggle_top_field_man(struct ipu_soc *ipu);
++int _ipu_csi_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t csi);
++int _ipu_csi_set_mipi_di(struct ipu_soc *ipu, uint32_t num, uint32_t di_val, uint32_t csi);
++void ipu_csi_set_test_generator(struct ipu_soc *ipu, bool active, uint32_t r_value,
++ uint32_t g_value, uint32_t b_value,
++ uint32_t pix_clk, uint32_t csi);
++void _ipu_csi_ccir_err_detection_enable(struct ipu_soc *ipu, uint32_t csi);
++void _ipu_csi_ccir_err_detection_disable(struct ipu_soc *ipu, uint32_t csi);
++void _ipu_csi_wait4eof(struct ipu_soc *ipu, ipu_channel_t channel);
++void _ipu_smfc_init(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t mipi_id, uint32_t csi);
++void _ipu_smfc_set_burst_size(struct ipu_soc *ipu, ipu_channel_t channel, uint32_t bs);
++void _ipu_dp_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3]);
++int32_t _ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t x_pos, int16_t y_pos);
++int32_t _ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel,
++ int16_t *x_pos, int16_t *y_pos);
++void _ipu_get(struct ipu_soc *ipu);
++void _ipu_put(struct ipu_soc *ipu);
++
++struct clk *clk_register_mux_pix_clk(struct device *dev, const char *name,
++ const char **parent_names, u8 num_parents, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_mux_flags);
++struct clk *clk_register_div_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_div_flags);
++struct clk *clk_register_gate_pix_clk(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ u8 ipu_id, u8 di_id, u8 clk_gate_flags);
++#endif /* __INCLUDE_IPU_PRV_H__ */
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/ipu_regs.h linux-xbian-imx6/drivers/mxc/ipu3/ipu_regs.h
+--- linux-4.1.3/drivers/mxc/ipu3/ipu_regs.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/ipu_regs.h 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,749 @@
++/*
++ * Copyright (C) 2005-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*
++ * @file ipu_regs.h
++ *
++ * @brief IPU Register definitions
++ *
++ * @ingroup IPU
++ */
++#ifndef __IPU_REGS_INCLUDED__
++#define __IPU_REGS_INCLUDED__
++
++enum imx_ipu_rev {
++ IPU_V3DEX = 2,
++ IPU_V3M,
++ IPU_V3H,
++};
++
++/*
++ * hw_rev 2: IPUV3DEX
++ * hw_rev 3: IPUV3M
++ * hw_rev 4: IPUV3H
++ */
++extern int g_ipu_hw_rev;
++
++#define IPU_MAX_VDI_IN_WIDTH ({g_ipu_hw_rev >= 3 ? \
++ (968) : \
++ (720); })
++#define IPU_DISP0_BASE 0x00000000
++#define IPU_MCU_T_DEFAULT 8
++#define IPU_DISP1_BASE ({g_ipu_hw_rev < 4 ? \
++ (IPU_MCU_T_DEFAULT << 25) : \
++ (0x00000000); })
++#define IPUV3DEX_REG_BASE 0x1E000000
++#define IPUV3M_REG_BASE 0x06000000
++#define IPUV3H_REG_BASE 0x00200000
++
++#define IPU_CM_REG_BASE 0x00000000
++#define IPU_IDMAC_REG_BASE 0x00008000
++#define IPU_ISP_REG_BASE 0x00010000
++#define IPU_DP_REG_BASE 0x00018000
++#define IPU_IC_REG_BASE 0x00020000
++#define IPU_IRT_REG_BASE 0x00028000
++#define IPU_CSI0_REG_BASE 0x00030000
++#define IPU_CSI1_REG_BASE 0x00038000
++#define IPU_DI0_REG_BASE 0x00040000
++#define IPU_DI1_REG_BASE 0x00048000
++#define IPU_SMFC_REG_BASE 0x00050000
++#define IPU_DC_REG_BASE 0x00058000
++#define IPU_DMFC_REG_BASE 0x00060000
++#define IPU_VDI_REG_BASE 0x00068000
++#define IPU_CPMEM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00100000) : \
++ (0x01000000); })
++#define IPU_LUT_REG_BASE 0x01020000
++#define IPU_SRM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00140000) : \
++ (0x01040000); })
++#define IPU_TPM_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00160000) : \
++ (0x01060000); })
++#define IPU_DC_TMPL_REG_BASE ({g_ipu_hw_rev >= 4 ? \
++ (0x00180000) : \
++ (0x01080000); })
++#define IPU_ISP_TBPR_REG_BASE 0x010C0000
++
++/* Register addresses */
++/* IPU Common registers */
++#define IPU_CM_REG(offset) (offset)
++
++#define IPU_CONF IPU_CM_REG(0)
++#define IPU_SRM_PRI1 IPU_CM_REG(0x00A0)
++#define IPU_SRM_PRI2 IPU_CM_REG(0x00A4)
++#define IPU_FS_PROC_FLOW1 IPU_CM_REG(0x00A8)
++#define IPU_FS_PROC_FLOW2 IPU_CM_REG(0x00AC)
++#define IPU_FS_PROC_FLOW3 IPU_CM_REG(0x00B0)
++#define IPU_FS_DISP_FLOW1 IPU_CM_REG(0x00B4)
++#define IPU_FS_DISP_FLOW2 IPU_CM_REG(0x00B8)
++#define IPU_SKIP IPU_CM_REG(0x00BC)
++#define IPU_DISP_ALT_CONF IPU_CM_REG(0x00C0)
++#define IPU_DISP_GEN IPU_CM_REG(0x00C4)
++#define IPU_DISP_ALT1 IPU_CM_REG(0x00C8)
++#define IPU_DISP_ALT2 IPU_CM_REG(0x00CC)
++#define IPU_DISP_ALT3 IPU_CM_REG(0x00D0)
++#define IPU_DISP_ALT4 IPU_CM_REG(0x00D4)
++#define IPU_SNOOP IPU_CM_REG(0x00D8)
++#define IPU_MEM_RST IPU_CM_REG(0x00DC)
++#define IPU_PM IPU_CM_REG(0x00E0)
++#define IPU_GPR IPU_CM_REG(0x00E4)
++#define IPU_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0150 + 4 * ((ch) / 32))
++#define IPU_ALT_CHA_DB_MODE_SEL(ch) IPU_CM_REG(0x0168 + 4 * ((ch) / 32))
++/*
++ * IPUv3D doesn't support triple buffer, so point
++ * IPU_CHA_TRB_MODE_SEL, IPU_CHA_TRIPLE_CUR_BUF and
++ * IPU_CHA_BUF2_RDY to readonly
++ * IPU_ALT_CUR_BUF0 for IPUv3D.
++ */
++#define IPU_CHA_TRB_MODE_SEL(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0178 + 4 * ((ch) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_TRIPLE_CUR_BUF(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0258 + \
++ 4 * (((ch) * 2) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_BUF2_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0288 + 4 * ((ch) / 32)) : \
++ (0x012C); })
++#define IPU_CHA_CUR_BUF(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x023C + 4 * ((ch) / 32)) : \
++ (0x0124 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CUR_BUF0 IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0244) : \
++ (0x012C); })
++#define IPU_ALT_CUR_BUF1 IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0248) : \
++ (0x0130); })
++#define IPU_SRM_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x024C) : \
++ (0x0134); })
++#define IPU_PROC_TASK_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0250) : \
++ (0x0138); })
++#define IPU_DISP_TASK_STAT IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0254) : \
++ (0x013C); })
++#define IPU_CHA_BUF0_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0268 + 4 * ((ch) / 32)) : \
++ (0x0140 + 4 * ((ch) / 32)); })
++#define IPU_CHA_BUF1_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0270 + 4 * ((ch) / 32)) : \
++ (0x0148 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CHA_BUF0_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0278 + 4 * ((ch) / 32)) : \
++ (0x0158 + 4 * ((ch) / 32)); })
++#define IPU_ALT_CHA_BUF1_RDY(ch) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0280 + 4 * ((ch) / 32)) : \
++ (0x0160 + 4 * ((ch) / 32)); })
++
++#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * ((n) - 1))
++#define IPU_INT_STAT(n) IPU_CM_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0200 + 4 * ((n) - 1)) : \
++ (0x00E8 + 4 * ((n) - 1)); })
++
++#define IPUIRQ_2_STATREG(irq) IPU_CM_REG(IPU_INT_STAT(1) + 4 * ((irq) / 32))
++#define IPUIRQ_2_CTRLREG(irq) IPU_CM_REG(IPU_INT_CTRL(1) + 4 * ((irq) / 32))
++#define IPUIRQ_2_MASK(irq) (1UL << ((irq) & 0x1F))
++
++/* IPU VDI registers */
++#define IPU_VDI_REG(offset) (offset)
++
++#define VDI_FSIZE IPU_VDI_REG(0)
++#define VDI_C IPU_VDI_REG(0x0004)
++
++/* IPU CSI Registers */
++#define IPU_CSI_REG(offset) (offset)
++
++#define CSI_SENS_CONF IPU_CSI_REG(0)
++#define CSI_SENS_FRM_SIZE IPU_CSI_REG(0x0004)
++#define CSI_ACT_FRM_SIZE IPU_CSI_REG(0x0008)
++#define CSI_OUT_FRM_CTRL IPU_CSI_REG(0x000C)
++#define CSI_TST_CTRL IPU_CSI_REG(0x0010)
++#define CSI_CCIR_CODE_1 IPU_CSI_REG(0x0014)
++#define CSI_CCIR_CODE_2 IPU_CSI_REG(0x0018)
++#define CSI_CCIR_CODE_3 IPU_CSI_REG(0x001C)
++#define CSI_MIPI_DI IPU_CSI_REG(0x0020)
++#define CSI_SKIP IPU_CSI_REG(0x0024)
++#define CSI_CPD_CTRL IPU_CSI_REG(0x0028)
++#define CSI_CPD_RC(n) IPU_CSI_REG(0x002C + 4 * (n))
++#define CSI_CPD_RS(n) IPU_CSI_REG(0x004C + 4 * (n))
++#define CSI_CPD_GRC(n) IPU_CSI_REG(0x005C + 4 * (n))
++#define CSI_CPD_GRS(n) IPU_CSI_REG(0x007C + 4 * (n))
++#define CSI_CPD_GBC(n) IPU_CSI_REG(0x008C + 4 * (n))
++#define CSI_CPD_GBS(n) IPU_CSI_REG(0x00AC + 4 * (n))
++#define CSI_CPD_BC(n) IPU_CSI_REG(0x00BC + 4 * (n))
++#define CSI_CPD_BS(n) IPU_CSI_REG(0x00DC + 4 * (n))
++#define CSI_CPD_OFFSET1 IPU_CSI_REG(0x00EC)
++#define CSI_CPD_OFFSET2 IPU_CSI_REG(0x00F0)
++
++/* IPU SMFC Registers */
++#define IPU_SMFC_REG(offset) (offset)
++
++#define SMFC_MAP IPU_SMFC_REG(0)
++#define SMFC_WMC IPU_SMFC_REG(0x0004)
++#define SMFC_BS IPU_SMFC_REG(0x0008)
++
++/* IPU IC Registers */
++#define IPU_IC_REG(offset) (offset)
++
++#define IC_CONF IPU_IC_REG(0)
++#define IC_PRP_ENC_RSC IPU_IC_REG(0x0004)
++#define IC_PRP_VF_RSC IPU_IC_REG(0x0008)
++#define IC_PP_RSC IPU_IC_REG(0x000C)
++#define IC_CMBP_1 IPU_IC_REG(0x0010)
++#define IC_CMBP_2 IPU_IC_REG(0x0014)
++#define IC_IDMAC_1 IPU_IC_REG(0x0018)
++#define IC_IDMAC_2 IPU_IC_REG(0x001C)
++#define IC_IDMAC_3 IPU_IC_REG(0x0020)
++#define IC_IDMAC_4 IPU_IC_REG(0x0024)
++
++/* IPU IDMAC Registers */
++#define IPU_IDMAC_REG(offset) (offset)
++
++#define IDMAC_CONF IPU_IDMAC_REG(0x0000)
++#define IDMAC_CHA_EN(ch) IPU_IDMAC_REG(0x0004 + 4 * ((ch) / 32))
++#define IDMAC_SEP_ALPHA IPU_IDMAC_REG(0x000C)
++#define IDMAC_ALT_SEP_ALPHA IPU_IDMAC_REG(0x0010)
++#define IDMAC_CHA_PRI(ch) IPU_IDMAC_REG(0x0014 + 4 * ((ch) / 32))
++#define IDMAC_WM_EN(ch) IPU_IDMAC_REG(0x001C + 4 * ((ch) / 32))
++#define IDMAC_CH_LOCK_EN_1 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0024) : 0; })
++#define IDMAC_CH_LOCK_EN_2 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0028) : \
++ (0x0024); })
++#define IDMAC_SUB_ADDR_0 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x002C) : \
++ (0x0028); })
++#define IDMAC_SUB_ADDR_1 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0030) : \
++ (0x002C); })
++#define IDMAC_SUB_ADDR_2 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0034) : \
++ (0x0030); })
++/*
++ * IPUv3D doesn't support IDMAC_SUB_ADDR_3 and IDMAC_SUB_ADDR_4,
++ * so point them to readonly IDMAC_CHA_BUSY1 for IPUv3D.
++ */
++#define IDMAC_SUB_ADDR_3 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0038) : \
++ (0x0040); })
++#define IDMAC_SUB_ADDR_4 IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x003C) : \
++ (0x0040); })
++#define IDMAC_BAND_EN(ch) IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0040 + 4 * ((ch) / 32)) : \
++ (0x0034 + 4 * ((ch) / 32)); })
++#define IDMAC_CHA_BUSY(ch) IPU_IDMAC_REG({g_ipu_hw_rev >= 2 ? \
++ (0x0100 + 4 * ((ch) / 32)) : \
++ (0x0040 + 4 * ((ch) / 32)); })
++
++/* IPU DI Registers */
++#define IPU_DI_REG(offset) (offset)
++
++#define DI_GENERAL IPU_DI_REG(0)
++#define DI_BS_CLKGEN0 IPU_DI_REG(0x0004)
++#define DI_BS_CLKGEN1 IPU_DI_REG(0x0008)
++#define DI_SW_GEN0(gen) IPU_DI_REG(0x000C + 4 * ((gen) - 1))
++#define DI_SW_GEN1(gen) IPU_DI_REG(0x0030 + 4 * ((gen) - 1))
++#define DI_STP_REP(gen) IPU_DI_REG(0x0148 + 4 * (((gen) - 1) / 2))
++#define DI_SYNC_AS_GEN IPU_DI_REG(0x0054)
++#define DI_DW_GEN(gen) IPU_DI_REG(0x0058 + 4 * (gen))
++#define DI_DW_SET(gen, set) IPU_DI_REG(0x0088 + 4 * ((gen) + 0xC * (set)))
++#define DI_SER_CONF IPU_DI_REG(0x015C)
++#define DI_SSC IPU_DI_REG(0x0160)
++#define DI_POL IPU_DI_REG(0x0164)
++#define DI_AW0 IPU_DI_REG(0x0168)
++#define DI_AW1 IPU_DI_REG(0x016C)
++#define DI_SCR_CONF IPU_DI_REG(0x0170)
++#define DI_STAT IPU_DI_REG(0x0174)
++
++/* IPU DMFC Registers */
++#define IPU_DMFC_REG(offset) (offset)
++
++#define DMFC_RD_CHAN IPU_DMFC_REG(0)
++#define DMFC_WR_CHAN IPU_DMFC_REG(0x0004)
++#define DMFC_WR_CHAN_DEF IPU_DMFC_REG(0x0008)
++#define DMFC_DP_CHAN IPU_DMFC_REG(0x000C)
++#define DMFC_DP_CHAN_DEF IPU_DMFC_REG(0x0010)
++#define DMFC_GENERAL1 IPU_DMFC_REG(0x0014)
++#define DMFC_GENERAL2 IPU_DMFC_REG(0x0018)
++#define DMFC_IC_CTRL IPU_DMFC_REG(0x001C)
++#define DMFC_STAT IPU_DMFC_REG(0x0020)
++
++/* IPU DC Registers */
++#define IPU_DC_REG(offset) (offset)
++
++#define DC_MAP_CONF_PTR(n) IPU_DC_REG(0x0108 + ((n) & ~0x1) * 2)
++#define DC_MAP_CONF_VAL(n) IPU_DC_REG(0x0144 + ((n) & ~0x1) * 2)
++
++#define _RL_CH_2_OFFSET(ch) (((ch) == 0) ? 8 : ( \
++ ((ch) == 1) ? 0x24 : ( \
++ ((ch) == 2) ? 0x40 : ( \
++ ((ch) == 5) ? 0x64 : ( \
++ ((ch) == 6) ? 0x80 : ( \
++ ((ch) == 8) ? 0x9C : ( \
++ ((ch) == 9) ? 0xBC : (-1))))))))
++#define DC_RL_CH(ch, evt) IPU_DC_REG(_RL_CH_2_OFFSET(ch) + \
++ ((evt) & ~0x1) * 2)
++
++#define DC_EVT_NF 0
++#define DC_EVT_NL 1
++#define DC_EVT_EOF 2
++#define DC_EVT_NFIELD 3
++#define DC_EVT_EOL 4
++#define DC_EVT_EOFIELD 5
++#define DC_EVT_NEW_ADDR 6
++#define DC_EVT_NEW_CHAN 7
++#define DC_EVT_NEW_DATA 8
++
++#define DC_EVT_NEW_ADDR_W_0 0
++#define DC_EVT_NEW_ADDR_W_1 1
++#define DC_EVT_NEW_CHAN_W_0 2
++#define DC_EVT_NEW_CHAN_W_1 3
++#define DC_EVT_NEW_DATA_W_0 4
++#define DC_EVT_NEW_DATA_W_1 5
++#define DC_EVT_NEW_ADDR_R_0 6
++#define DC_EVT_NEW_ADDR_R_1 7
++#define DC_EVT_NEW_CHAN_R_0 8
++#define DC_EVT_NEW_CHAN_R_1 9
++#define DC_EVT_NEW_DATA_R_0 10
++#define DC_EVT_NEW_DATA_R_1 11
++#define DC_EVEN_UGDE0 12
++#define DC_ODD_UGDE0 13
++#define DC_EVEN_UGDE1 14
++#define DC_ODD_UGDE1 15
++#define DC_EVEN_UGDE2 16
++#define DC_ODD_UGDE2 17
++#define DC_EVEN_UGDE3 18
++#define DC_ODD_UGDE3 19
++
++#define dc_ch_offset(ch) \
++({ \
++ const u8 _offset[] = { \
++ 0, 0x1C, 0x38, 0x54, 0x58, 0x5C, 0x78, 0, 0x94, 0xB4}; \
++ _offset[ch]; \
++})
++#define DC_WR_CH_CONF(ch) IPU_DC_REG(dc_ch_offset(ch))
++#define DC_WR_CH_ADDR(ch) IPU_DC_REG(dc_ch_offset(ch) + 4)
++
++#define DC_WR_CH_CONF_1 IPU_DC_REG(0x001C)
++#define DC_WR_CH_ADDR_1 IPU_DC_REG(0x0020)
++#define DC_WR_CH_CONF_5 IPU_DC_REG(0x005C)
++#define DC_WR_CH_ADDR_5 IPU_DC_REG(0x0060)
++#define DC_GEN IPU_DC_REG(0x00D4)
++#define DC_DISP_CONF1(disp) IPU_DC_REG(0x00D8 + 4 * (disp))
++#define DC_DISP_CONF2(disp) IPU_DC_REG(0x00E8 + 4 * (disp))
++#define DC_STAT IPU_DC_REG(0x01C8)
++#define DC_UGDE_0(evt) IPU_DC_REG(0x0174 + 16 * (evt))
++#define DC_UGDE_1(evt) IPU_DC_REG(0x0178 + 16 * (evt))
++#define DC_UGDE_2(evt) IPU_DC_REG(0x017C + 16 * (evt))
++#define DC_UGDE_3(evt) IPU_DC_REG(0x0180 + 16 * (evt))
++
++/* IPU DP Registers */
++#define IPU_DP_REG(offset) (offset)
++
++#define DP_SYNC 0
++#define DP_ASYNC0 0x60
++#define DP_ASYNC1 0xBC
++#define DP_COM_CONF(flow) IPU_DP_REG(flow)
++#define DP_GRAPH_WIND_CTRL(flow) IPU_DP_REG(0x0004 + (flow))
++#define DP_FG_POS(flow) IPU_DP_REG(0x0008 + (flow))
++#define DP_GAMMA_C(flow, i) IPU_DP_REG(0x0014 + (flow) + 4 * (i))
++#define DP_GAMMA_S(flow, i) IPU_DP_REG(0x0034 + (flow) + 4 * (i))
++#define DP_CSC_A_0(flow) IPU_DP_REG(0x0044 + (flow))
++#define DP_CSC_A_1(flow) IPU_DP_REG(0x0048 + (flow))
++#define DP_CSC_A_2(flow) IPU_DP_REG(0x004C + (flow))
++#define DP_CSC_A_3(flow) IPU_DP_REG(0x0050 + (flow))
++#define DP_CSC_0(flow) IPU_DP_REG(0x0054 + (flow))
++#define DP_CSC_1(flow) IPU_DP_REG(0x0058 + (flow))
++
++enum {
++ IPU_CONF_CSI0_EN = 0x00000001,
++ IPU_CONF_CSI1_EN = 0x00000002,
++ IPU_CONF_IC_EN = 0x00000004,
++ IPU_CONF_ROT_EN = 0x00000008,
++ IPU_CONF_ISP_EN = 0x00000010,
++ IPU_CONF_DP_EN = 0x00000020,
++ IPU_CONF_DI0_EN = 0x00000040,
++ IPU_CONF_DI1_EN = 0x00000080,
++ IPU_CONF_DMFC_EN = 0x00000400,
++ IPU_CONF_SMFC_EN = 0x00000100,
++ IPU_CONF_DC_EN = 0x00000200,
++ IPU_CONF_VDI_EN = 0x00001000,
++ IPU_CONF_IDMAC_DIS = 0x00400000,
++ IPU_CONF_IC_DMFC_SEL = 0x02000000,
++ IPU_CONF_IC_DMFC_SYNC = 0x04000000,
++ IPU_CONF_VDI_DMFC_SYNC = 0x08000000,
++ IPU_CONF_CSI0_DATA_SOURCE = 0x10000000,
++ IPU_CONF_CSI0_DATA_SOURCE_OFFSET = 28,
++ IPU_CONF_CSI1_DATA_SOURCE = 0x20000000,
++ IPU_CONF_IC_INPUT = 0x40000000,
++ IPU_CONF_CSI_SEL = 0x80000000,
++
++ DI0_COUNTER_RELEASE = 0x01000000,
++ DI1_COUNTER_RELEASE = 0x02000000,
++
++ FS_PRPVF_ROT_SRC_SEL_MASK = 0x00000F00,
++ FS_PRPVF_ROT_SRC_SEL_OFFSET = 8,
++ FS_PRPENC_ROT_SRC_SEL_MASK = 0x0000000F,
++ FS_PRPENC_ROT_SRC_SEL_OFFSET = 0,
++ FS_PP_ROT_SRC_SEL_MASK = 0x000F0000,
++ FS_PP_ROT_SRC_SEL_OFFSET = 16,
++ FS_PP_SRC_SEL_MASK = 0x0000F000,
++ FS_PP_SRC_SEL_VDOA = 0x00008000,
++ FS_PP_SRC_SEL_OFFSET = 12,
++ FS_PRP_SRC_SEL_MASK = 0x0F000000,
++ FS_PRP_SRC_SEL_OFFSET = 24,
++ FS_VF_IN_VALID = 0x80000000,
++ FS_ENC_IN_VALID = 0x40000000,
++ FS_VDI_SRC_SEL_MASK = 0x30000000,
++ FS_VDI_SRC_SEL_VDOA = 0x20000000,
++ FS_VDOA_DEST_SEL_MASK = 0x00030000,
++ FS_VDOA_DEST_SEL_VDI = 0x00020000,
++ FS_VDOA_DEST_SEL_IC = 0x00010000,
++ FS_VDI_SRC_SEL_OFFSET = 28,
++
++
++ FS_PRPENC_DEST_SEL_MASK = 0x0000000F,
++ FS_PRPENC_DEST_SEL_OFFSET = 0,
++ FS_PRPVF_DEST_SEL_MASK = 0x000000F0,
++ FS_PRPVF_DEST_SEL_OFFSET = 4,
++ FS_PRPVF_ROT_DEST_SEL_MASK = 0x00000F00,
++ FS_PRPVF_ROT_DEST_SEL_OFFSET = 8,
++ FS_PP_DEST_SEL_MASK = 0x0000F000,
++ FS_PP_DEST_SEL_OFFSET = 12,
++ FS_PP_ROT_DEST_SEL_MASK = 0x000F0000,
++ FS_PP_ROT_DEST_SEL_OFFSET = 16,
++ FS_PRPENC_ROT_DEST_SEL_MASK = 0x00F00000,
++ FS_PRPENC_ROT_DEST_SEL_OFFSET = 20,
++
++ FS_SMFC0_DEST_SEL_MASK = 0x0000000F,
++ FS_SMFC0_DEST_SEL_OFFSET = 0,
++ FS_SMFC1_DEST_SEL_MASK = 0x00000070,
++ FS_SMFC1_DEST_SEL_OFFSET = 4,
++ FS_SMFC2_DEST_SEL_MASK = 0x00000780,
++ FS_SMFC2_DEST_SEL_OFFSET = 7,
++ FS_SMFC3_DEST_SEL_MASK = 0x00003800,
++ FS_SMFC3_DEST_SEL_OFFSET = 11,
++
++ FS_DC1_SRC_SEL_MASK = 0x00F00000,
++ FS_DC1_SRC_SEL_OFFSET = 20,
++ FS_DC2_SRC_SEL_MASK = 0x000F0000,
++ FS_DC2_SRC_SEL_OFFSET = 16,
++ FS_DP_SYNC0_SRC_SEL_MASK = 0x0000000F,
++ FS_DP_SYNC0_SRC_SEL_OFFSET = 0,
++ FS_DP_SYNC1_SRC_SEL_MASK = 0x000000F0,
++ FS_DP_SYNC1_SRC_SEL_OFFSET = 4,
++ FS_DP_ASYNC0_SRC_SEL_MASK = 0x00000F00,
++ FS_DP_ASYNC0_SRC_SEL_OFFSET = 8,
++ FS_DP_ASYNC1_SRC_SEL_MASK = 0x0000F000,
++ FS_DP_ASYNC1_SRC_SEL_OFFSET = 12,
++
++ FS_AUTO_REF_PER_MASK = 0,
++ FS_AUTO_REF_PER_OFFSET = 16,
++
++ TSTAT_VF_MASK = 0x0000000C,
++ TSTAT_VF_OFFSET = 2,
++ TSTAT_VF_ROT_MASK = 0x00000300,
++ TSTAT_VF_ROT_OFFSET = 8,
++ TSTAT_ENC_MASK = 0x00000003,
++ TSTAT_ENC_OFFSET = 0,
++ TSTAT_ENC_ROT_MASK = 0x000000C0,
++ TSTAT_ENC_ROT_OFFSET = 6,
++ TSTAT_PP_MASK = 0x00000030,
++ TSTAT_PP_OFFSET = 4,
++ TSTAT_PP_ROT_MASK = 0x00000C00,
++ TSTAT_PP_ROT_OFFSET = 10,
++
++ TASK_STAT_IDLE = 0,
++ TASK_STAT_ACTIVE = 1,
++ TASK_STAT_WAIT4READY = 2,
++
++ /* Image Converter Register bits */
++ IC_CONF_PRPENC_EN = 0x00000001,
++ IC_CONF_PRPENC_CSC1 = 0x00000002,
++ IC_CONF_PRPENC_ROT_EN = 0x00000004,
++ IC_CONF_PRPVF_EN = 0x00000100,
++ IC_CONF_PRPVF_CSC1 = 0x00000200,
++ IC_CONF_PRPVF_CSC2 = 0x00000400,
++ IC_CONF_PRPVF_CMB = 0x00000800,
++ IC_CONF_PRPVF_ROT_EN = 0x00001000,
++ IC_CONF_PP_EN = 0x00010000,
++ IC_CONF_PP_CSC1 = 0x00020000,
++ IC_CONF_PP_CSC2 = 0x00040000,
++ IC_CONF_PP_CMB = 0x00080000,
++ IC_CONF_PP_ROT_EN = 0x00100000,
++ IC_CONF_IC_GLB_LOC_A = 0x10000000,
++ IC_CONF_KEY_COLOR_EN = 0x20000000,
++ IC_CONF_RWS_EN = 0x40000000,
++ IC_CONF_CSI_MEM_WR_EN = 0x80000000,
++
++ IC_RSZ_MAX_RESIZE_RATIO = 0x00004000,
++
++ IC_IDMAC_1_CB0_BURST_16 = 0x00000001,
++ IC_IDMAC_1_CB1_BURST_16 = 0x00000002,
++ IC_IDMAC_1_CB2_BURST_16 = 0x00000004,
++ IC_IDMAC_1_CB3_BURST_16 = 0x00000008,
++ IC_IDMAC_1_CB4_BURST_16 = 0x00000010,
++ IC_IDMAC_1_CB5_BURST_16 = 0x00000020,
++ IC_IDMAC_1_CB6_BURST_16 = 0x00000040,
++ IC_IDMAC_1_CB7_BURST_16 = 0x00000080,
++ IC_IDMAC_1_PRPENC_ROT_MASK = 0x00003800,
++ IC_IDMAC_1_PRPENC_ROT_OFFSET = 11,
++ IC_IDMAC_1_PRPVF_ROT_MASK = 0x0001C000,
++ IC_IDMAC_1_PRPVF_ROT_OFFSET = 14,
++ IC_IDMAC_1_PP_ROT_MASK = 0x000E0000,
++ IC_IDMAC_1_PP_ROT_OFFSET = 17,
++ IC_IDMAC_1_PP_FLIP_RS = 0x00400000,
++ IC_IDMAC_1_PRPVF_FLIP_RS = 0x00200000,
++ IC_IDMAC_1_PRPENC_FLIP_RS = 0x00100000,
++
++ IC_IDMAC_2_PRPENC_HEIGHT_MASK = 0x000003FF,
++ IC_IDMAC_2_PRPENC_HEIGHT_OFFSET = 0,
++ IC_IDMAC_2_PRPVF_HEIGHT_MASK = 0x000FFC00,
++ IC_IDMAC_2_PRPVF_HEIGHT_OFFSET = 10,
++ IC_IDMAC_2_PP_HEIGHT_MASK = 0x3FF00000,
++ IC_IDMAC_2_PP_HEIGHT_OFFSET = 20,
++
++ IC_IDMAC_3_PRPENC_WIDTH_MASK = 0x000003FF,
++ IC_IDMAC_3_PRPENC_WIDTH_OFFSET = 0,
++ IC_IDMAC_3_PRPVF_WIDTH_MASK = 0x000FFC00,
++ IC_IDMAC_3_PRPVF_WIDTH_OFFSET = 10,
++ IC_IDMAC_3_PP_WIDTH_MASK = 0x3FF00000,
++ IC_IDMAC_3_PP_WIDTH_OFFSET = 20,
++
++ CSI_SENS_CONF_DATA_FMT_SHIFT = 8,
++ CSI_SENS_CONF_DATA_FMT_MASK = 0x00000700,
++ CSI_SENS_CONF_DATA_FMT_RGB_YUV444 = 0L,
++ CSI_SENS_CONF_DATA_FMT_YUV422_YUYV = 1L,
++ CSI_SENS_CONF_DATA_FMT_YUV422_UYVY = 2L,
++ CSI_SENS_CONF_DATA_FMT_BAYER = 3L,
++ CSI_SENS_CONF_DATA_FMT_RGB565 = 4L,
++ CSI_SENS_CONF_DATA_FMT_RGB555 = 5L,
++ CSI_SENS_CONF_DATA_FMT_RGB444 = 6L,
++ CSI_SENS_CONF_DATA_FMT_JPEG = 7L,
++
++ CSI_SENS_CONF_VSYNC_POL_SHIFT = 0,
++ CSI_SENS_CONF_HSYNC_POL_SHIFT = 1,
++ CSI_SENS_CONF_DATA_POL_SHIFT = 2,
++ CSI_SENS_CONF_PIX_CLK_POL_SHIFT = 3,
++ CSI_SENS_CONF_SENS_PRTCL_MASK = 0x00000070L,
++ CSI_SENS_CONF_SENS_PRTCL_SHIFT = 4,
++ CSI_SENS_CONF_PACK_TIGHT_SHIFT = 7,
++ CSI_SENS_CONF_DATA_WIDTH_SHIFT = 11,
++ CSI_SENS_CONF_EXT_VSYNC_SHIFT = 15,
++ CSI_SENS_CONF_DIVRATIO_SHIFT = 16,
++
++ CSI_SENS_CONF_DIVRATIO_MASK = 0x00FF0000L,
++ CSI_SENS_CONF_DATA_DEST_SHIFT = 24,
++ CSI_SENS_CONF_DATA_DEST_MASK = 0x07000000L,
++ CSI_SENS_CONF_JPEG8_EN_SHIFT = 27,
++ CSI_SENS_CONF_JPEG_EN_SHIFT = 28,
++ CSI_SENS_CONF_FORCE_EOF_SHIFT = 29,
++ CSI_SENS_CONF_DATA_EN_POL_SHIFT = 31,
++
++ CSI_DATA_DEST_ISP = 1L,
++ CSI_DATA_DEST_IC = 2L,
++ CSI_DATA_DEST_IDMAC = 4L,
++
++ CSI_CCIR_ERR_DET_EN = 0x01000000L,
++ CSI_HORI_DOWNSIZE_EN = 0x80000000L,
++ CSI_VERT_DOWNSIZE_EN = 0x40000000L,
++ CSI_TEST_GEN_MODE_EN = 0x01000000L,
++
++ CSI_HSC_MASK = 0x1FFF0000,
++ CSI_HSC_SHIFT = 16,
++ CSI_VSC_MASK = 0x00000FFF,
++ CSI_VSC_SHIFT = 0,
++
++ CSI_TEST_GEN_R_MASK = 0x000000FFL,
++ CSI_TEST_GEN_R_SHIFT = 0,
++ CSI_TEST_GEN_G_MASK = 0x0000FF00L,
++ CSI_TEST_GEN_G_SHIFT = 8,
++ CSI_TEST_GEN_B_MASK = 0x00FF0000L,
++ CSI_TEST_GEN_B_SHIFT = 16,
++
++ CSI_MIPI_DI0_MASK = 0x000000FFL,
++ CSI_MIPI_DI0_SHIFT = 0,
++ CSI_MIPI_DI1_MASK = 0x0000FF00L,
++ CSI_MIPI_DI1_SHIFT = 8,
++ CSI_MIPI_DI2_MASK = 0x00FF0000L,
++ CSI_MIPI_DI2_SHIFT = 16,
++ CSI_MIPI_DI3_MASK = 0xFF000000L,
++ CSI_MIPI_DI3_SHIFT = 24,
++
++ CSI_MAX_RATIO_SKIP_ISP_MASK = 0x00070000L,
++ CSI_MAX_RATIO_SKIP_ISP_SHIFT = 16,
++ CSI_SKIP_ISP_MASK = 0x00F80000L,
++ CSI_SKIP_ISP_SHIFT = 19,
++ CSI_MAX_RATIO_SKIP_SMFC_MASK = 0x00000007L,
++ CSI_MAX_RATIO_SKIP_SMFC_SHIFT = 0,
++ CSI_SKIP_SMFC_MASK = 0x000000F8L,
++ CSI_SKIP_SMFC_SHIFT = 3,
++ CSI_ID_2_SKIP_MASK = 0x00000300L,
++ CSI_ID_2_SKIP_SHIFT = 8,
++
++ CSI_COLOR_FIRST_ROW_MASK = 0x00000002L,
++ CSI_COLOR_FIRST_COMP_MASK = 0x00000001L,
++
++ SMFC_MAP_CH0_MASK = 0x00000007L,
++ SMFC_MAP_CH0_SHIFT = 0,
++ SMFC_MAP_CH1_MASK = 0x00000038L,
++ SMFC_MAP_CH1_SHIFT = 3,
++ SMFC_MAP_CH2_MASK = 0x000001C0L,
++ SMFC_MAP_CH2_SHIFT = 6,
++ SMFC_MAP_CH3_MASK = 0x00000E00L,
++ SMFC_MAP_CH3_SHIFT = 9,
++
++ SMFC_WM0_SET_MASK = 0x00000007L,
++ SMFC_WM0_SET_SHIFT = 0,
++ SMFC_WM1_SET_MASK = 0x000001C0L,
++ SMFC_WM1_SET_SHIFT = 6,
++ SMFC_WM2_SET_MASK = 0x00070000L,
++ SMFC_WM2_SET_SHIFT = 16,
++ SMFC_WM3_SET_MASK = 0x01C00000L,
++ SMFC_WM3_SET_SHIFT = 22,
++
++ SMFC_WM0_CLR_MASK = 0x00000038L,
++ SMFC_WM0_CLR_SHIFT = 3,
++ SMFC_WM1_CLR_MASK = 0x00000E00L,
++ SMFC_WM1_CLR_SHIFT = 9,
++ SMFC_WM2_CLR_MASK = 0x00380000L,
++ SMFC_WM2_CLR_SHIFT = 19,
++ SMFC_WM3_CLR_MASK = 0x0E000000L,
++ SMFC_WM3_CLR_SHIFT = 25,
++
++ SMFC_BS0_MASK = 0x0000000FL,
++ SMFC_BS0_SHIFT = 0,
++ SMFC_BS1_MASK = 0x000000F0L,
++ SMFC_BS1_SHIFT = 4,
++ SMFC_BS2_MASK = 0x00000F00L,
++ SMFC_BS2_SHIFT = 8,
++ SMFC_BS3_MASK = 0x0000F000L,
++ SMFC_BS3_SHIFT = 12,
++
++ PF_CONF_TYPE_MASK = 0x00000007,
++ PF_CONF_TYPE_SHIFT = 0,
++ PF_CONF_PAUSE_EN = 0x00000010,
++ PF_CONF_RESET = 0x00008000,
++ PF_CONF_PAUSE_ROW_MASK = 0x00FF0000,
++ PF_CONF_PAUSE_ROW_SHIFT = 16,
++
++ DI_DW_GEN_ACCESS_SIZE_OFFSET = 24,
++ DI_DW_GEN_COMPONENT_SIZE_OFFSET = 16,
++
++ DI_GEN_DI_CLK_EXT = 0x100000,
++ DI_GEN_POLARITY_DISP_CLK = 0x00020000,
++ DI_GEN_POLARITY_1 = 0x00000001,
++ DI_GEN_POLARITY_2 = 0x00000002,
++ DI_GEN_POLARITY_3 = 0x00000004,
++ DI_GEN_POLARITY_4 = 0x00000008,
++ DI_GEN_POLARITY_5 = 0x00000010,
++ DI_GEN_POLARITY_6 = 0x00000020,
++ DI_GEN_POLARITY_7 = 0x00000040,
++ DI_GEN_POLARITY_8 = 0x00000080,
++
++ DI_POL_DRDY_DATA_POLARITY = 0x00000080,
++ DI_POL_DRDY_POLARITY_15 = 0x00000010,
++
++ DI_VSYNC_SEL_OFFSET = 13,
++
++ DC_WR_CH_CONF_FIELD_MODE = 0x00000200,
++ DC_WR_CH_CONF_PROG_TYPE_OFFSET = 5,
++ DC_WR_CH_CONF_PROG_TYPE_MASK = 0x000000E0,
++ DC_WR_CH_CONF_PROG_DI_ID = 0x00000004,
++ DC_WR_CH_CONF_PROG_DISP_ID_OFFSET = 3,
++ DC_WR_CH_CONF_PROG_DISP_ID_MASK = 0x00000018,
++
++ DC_UGDE_0_ODD_EN = 0x02000000,
++ DC_UGDE_0_ID_CODED_MASK = 0x00000007,
++ DC_UGDE_0_ID_CODED_OFFSET = 0,
++ DC_UGDE_0_EV_PRIORITY_MASK = 0x00000078,
++ DC_UGDE_0_EV_PRIORITY_OFFSET = 3,
++
++ DP_COM_CONF_FG_EN = 0x00000001,
++ DP_COM_CONF_GWSEL = 0x00000002,
++ DP_COM_CONF_GWAM = 0x00000004,
++ DP_COM_CONF_GWCKE = 0x00000008,
++ DP_COM_CONF_CSC_DEF_MASK = 0x00000300,
++ DP_COM_CONF_CSC_DEF_OFFSET = 8,
++ DP_COM_CONF_CSC_DEF_FG = 0x00000300,
++ DP_COM_CONF_CSC_DEF_BG = 0x00000200,
++ DP_COM_CONF_CSC_DEF_BOTH = 0x00000100,
++ DP_COM_CONF_GAMMA_EN = 0x00001000,
++ DP_COM_CONF_GAMMA_YUV_EN = 0x00002000,
++
++ DI_SER_CONF_LLA_SER_ACCESS = 0x00000020,
++ DI_SER_CONF_SERIAL_CLK_POL = 0x00000010,
++ DI_SER_CONF_SERIAL_DATA_POL = 0x00000008,
++ DI_SER_CONF_SERIAL_RS_POL = 0x00000004,
++ DI_SER_CONF_SERIAL_CS_POL = 0x00000002,
++ DI_SER_CONF_WAIT4SERIAL = 0x00000001,
++
++ VDI_C_CH_420 = 0x00000000,
++ VDI_C_CH_422 = 0x00000002,
++ VDI_C_MOT_SEL_FULL = 0x00000008,
++ VDI_C_MOT_SEL_LOW = 0x00000004,
++ VDI_C_MOT_SEL_MED = 0x00000000,
++ VDI_C_BURST_SIZE1_4 = 0x00000030,
++ VDI_C_BURST_SIZE2_4 = 0x00000300,
++ VDI_C_BURST_SIZE3_4 = 0x00003000,
++ VDI_C_BURST_SIZE_MASK = 0xF,
++ VDI_C_BURST_SIZE1_OFFSET = 4,
++ VDI_C_BURST_SIZE2_OFFSET = 8,
++ VDI_C_BURST_SIZE3_OFFSET = 12,
++ VDI_C_VWM1_SET_1 = 0x00000000,
++ VDI_C_VWM1_SET_2 = 0x00010000,
++ VDI_C_VWM1_CLR_2 = 0x00080000,
++ VDI_C_VWM3_SET_1 = 0x00000000,
++ VDI_C_VWM3_SET_2 = 0x00400000,
++ VDI_C_VWM3_CLR_2 = 0x02000000,
++ VDI_C_TOP_FIELD_MAN_1 = 0x40000000,
++ VDI_C_TOP_FIELD_AUTO_1 = 0x80000000,
++};
++
++enum di_pins {
++ DI_PIN11 = 0,
++ DI_PIN12 = 1,
++ DI_PIN13 = 2,
++ DI_PIN14 = 3,
++ DI_PIN15 = 4,
++ DI_PIN16 = 5,
++ DI_PIN17 = 6,
++ DI_PIN_CS = 7,
++
++ DI_PIN_SER_CLK = 0,
++ DI_PIN_SER_RS = 1,
++};
++
++enum di_sync_wave {
++ DI_SYNC_NONE = -1,
++ DI_SYNC_CLK = 0,
++ DI_SYNC_INT_HSYNC = 1,
++ DI_SYNC_COUNT_1 = 1,
++ DI_SYNC_HSYNC = 2,
++ DI_SYNC_VSYNC = 3,
++ DI_SYNC_AFIELD = 4,
++ DI_SYNC_ALINE = 5,
++ DI_SYNC_APIXEL = 6,
++ DI_SYNC_COUNT_7 = 7,
++ DI_SYNC_COUNT_8 = 8,
++ DI_SYNC_COUNT_9 = 9,
++};
++
++/* DC template opcodes */
++#define WROD(lf) (0x18 | (lf << 1))
++#define WRG (0x01)
++
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/Kconfig linux-xbian-imx6/drivers/mxc/ipu3/Kconfig
+--- linux-4.1.3/drivers/mxc/ipu3/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/Kconfig 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,2 @@
++config MXC_IPU_V3_FSL
++ bool
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/Makefile linux-xbian-imx6/drivers/mxc/ipu3/Makefile
+--- linux-4.1.3/drivers/mxc/ipu3/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/Makefile 2015-07-27 23:13:06.226765901 +0200
+@@ -0,0 +1,4 @@
++obj-$(CONFIG_MXC_IPU_V3_FSL) = mxc_ipu.o
++
++mxc_ipu-objs := ipu_common.o ipu_ic.o ipu_disp.o ipu_capture.o ipu_device.o \
++ ipu_calc_stripes_sizes.o vdoa.o ipu_pixel_clk.o
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/vdoa.c linux-xbian-imx6/drivers/mxc/ipu3/vdoa.c
+--- linux-4.1.3/drivers/mxc/ipu3/vdoa.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/vdoa.c 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,547 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/ipu.h>
++#include <linux/genalloc.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/of.h>
++#include <linux/of_irq.h>
++#include <linux/of_pci.h>
++
++#include "vdoa.h"
++/* 6band(3field* double buffer) * (width*2) * bandline(8)
++ = 6x1024x2x8 = 96k or 72k(1.5byte) */
++#define MAX_VDOA_IRAM_SIZE (1024*96)
++#define VDOA_IRAM_SIZE (1024*72)
++
++#define VDOAC_BAND_HEIGHT_32LINES (32)
++#define VDOAC_BAND_HEIGHT_16LINES (16)
++#define VDOAC_BAND_HEIGHT_8LINES (8)
++#define VDOAC_THREE_FRAMES (0x1 << 2)
++#define VDOAC_SYNC_BAND_MODE (0x1 << 3)
++#define VDOAC_SCAN_ORDER_INTERLACED (0x1 << 4)
++#define VDOAC_PFS_YUYV (0x1 << 5)
++#define VDOAC_IPU_SEL_1 (0x1 << 6)
++#define VDOAFP_FH_MASK (0x1FFF)
++#define VDOAFP_FH_SHIFT (16)
++#define VDOAFP_FW_MASK (0x3FFF)
++#define VDOAFP_FW_SHIFT (0)
++#define VDOASL_VSLY_MASK (0x3FFF)
++#define VDOASL_VSLY_SHIFT (16)
++#define VDOASL_ISLY_MASK (0x7FFF)
++#define VDOASL_ISLY_SHIFT (0)
++#define VDOASRR_START_XFER (0x2)
++#define VDOASRR_SWRST (0x1)
++#define VDOAIEIST_TRANSFER_ERR (0x2)
++#define VDOAIEIST_TRANSFER_END (0x1)
++
++#define VDOAC (0x0) /* Control Register */
++#define VDOASRR (0x4) /* Start and Reset Register */
++#define VDOAIE (0x8) /* Interrupt Enable Register */
++#define VDOAIST (0xc) /* Interrupt Status Register */
++#define VDOAFP (0x10) /* Frame Parameters Register */
++#define VDOAIEBA00 (0x14) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA01 (0x18) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA02 (0x1c) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA10 (0x20) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA11 (0x24) /* External Buffer n Frame m Address Register */
++#define VDOAIEBA12 (0x28) /* External Buffer n Frame m Address Register */
++#define VDOASL (0x2c) /* IPU Stride Line Register */
++#define VDOAIUBO (0x30) /* IPU Chroma Buffer Offset Register */
++#define VDOAVEBA0 (0x34) /* External Buffer m Address Register */
++#define VDOAVEBA1 (0x38) /* External Buffer m Address Register */
++#define VDOAVEBA2 (0x3c) /* External Buffer m Address Register */
++#define VDOAVUBO (0x40) /* VPU Chroma Buffer Offset */
++#define VDOASR (0x44) /* Status Register */
++#define VDOATD (0x48) /* Test Debug Register */
++
++
++enum {
++ VDOA_INIT = 0x1,
++ VDOA_GET = 0x2,
++ VDOA_SETUP = 0x4,
++ VDOA_GET_OBUF = 0x8,
++ VDOA_START = 0x10,
++ VDOA_INIRQ = 0x20,
++ VDOA_STOP = 0x40,
++ VDOA_PUT = VDOA_INIT,
++};
++
++enum {
++ VDOA_NULL = 0,
++ VDOA_FRAME = 1,
++ VDOA_PREV_FIELD = 2,
++ VDOA_CURR_FIELD = 3,
++ VDOA_NEXT_FIELD = 4,
++};
++
++#define CHECK_STATE(expect, retcode) \
++do { \
++ if (!((expect) & vdoa->state)) { \
++ dev_err(vdoa->dev, "ERR: %s state:0x%x, expect:0x%x.\n",\
++ __func__, vdoa->state, (expect)); \
++ retcode; \
++ } \
++} while (0)
++
++#define CHECK_NULL_PTR(ptr) \
++do { \
++ pr_debug("vdoa_ptr:0x%p in %s state:0x%x.\n", \
++ vdoa, __func__, vdoa->state); \
++ if (NULL == (ptr)) { \
++ pr_err("ERR vdoa: %s state:0x%x null ptr.\n", \
++ __func__, vdoa->state); \
++ } \
++} while (0)
++
++struct vdoa_info {
++ int state;
++ struct device *dev;
++ struct clk *vdoa_clk;
++ void __iomem *reg_base;
++ struct gen_pool *iram_pool;
++ unsigned long iram_base;
++ unsigned long iram_paddr;
++ int irq;
++ int field;
++ struct completion comp;
++};
++
++static struct vdoa_info *g_vdoa;
++static unsigned long iram_size;
++static DEFINE_MUTEX(vdoa_lock);
++
++static inline void vdoa_read_register(struct vdoa_info *vdoa,
++ u32 reg, u32 *val)
++{
++ *val = ioread32(vdoa->reg_base + reg);
++ dev_dbg(vdoa->dev, "read_reg:0x%02x, val:0x%08x.\n", reg, *val);
++}
++
++static inline void vdoa_write_register(struct vdoa_info *vdoa,
++ u32 reg, u32 val)
++{
++ iowrite32(val, vdoa->reg_base + reg);
++ dev_dbg(vdoa->dev, "\t\twrite_reg:0x%02x, val:0x%08x.\n", reg, val);
++}
++
++static void dump_registers(struct vdoa_info *vdoa)
++{
++ int i;
++ u32 data;
++
++ for (i = VDOAC; i < VDOATD; i += 4)
++ vdoa_read_register(vdoa, i, &data);
++}
++
++int vdoa_setup(vdoa_handle_t handle, struct vdoa_params *params)
++{
++ int band_size;
++ int total_band_size = 0;
++ int ipu_stride;
++ u32 data;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET | VDOA_GET_OBUF | VDOA_STOP, return -EINVAL);
++ if (VDOA_GET == vdoa->state) {
++ dev_dbg(vdoa->dev, "w:%d, h:%d.\n",
++ params->width, params->height);
++ data = (params->band_lines == VDOAC_BAND_HEIGHT_32LINES) ? 2 :
++ ((params->band_lines == VDOAC_BAND_HEIGHT_16LINES) ?
++ 1 : 0);
++ data |= params->scan_order ? VDOAC_SCAN_ORDER_INTERLACED : 0;
++ data |= params->band_mode ? VDOAC_SYNC_BAND_MODE : 0;
++ data |= params->pfs ? VDOAC_PFS_YUYV : 0;
++ data |= params->ipu_num ? VDOAC_IPU_SEL_1 : 0;
++ vdoa_write_register(vdoa, VDOAC, data);
++
++ data = ((params->width & VDOAFP_FW_MASK) << VDOAFP_FW_SHIFT) |
++ ((params->height & VDOAFP_FH_MASK) << VDOAFP_FH_SHIFT);
++ vdoa_write_register(vdoa, VDOAFP, data);
++
++ ipu_stride = params->pfs ? params->width << 1 : params->width;
++ data = ((params->vpu_stride & VDOASL_VSLY_MASK) <<
++ VDOASL_VSLY_SHIFT) |
++ ((ipu_stride & VDOASL_ISLY_MASK) << VDOASL_ISLY_SHIFT);
++ vdoa_write_register(vdoa, VDOASL, data);
++
++ dev_dbg(vdoa->dev, "band_mode:%d, band_line:%d, base:0x%lx.\n",
++ params->band_mode, params->band_lines, vdoa->iram_paddr);
++ }
++ /*
++ * band size = (luma_per_line + chroma_per_line) * bandLines
++ * = width * (3/2 or 2) * bandLines
++ * double buffer mode used.
++ */
++ if (params->pfs)
++ band_size = (params->width << 1) * params->band_lines;
++ else
++ band_size = ((params->width * 3) >> 1) *
++ params->band_lines;
++ if (params->interlaced) {
++ total_band_size = 6 * band_size; /* 3 frames*double buffer */
++ if (iram_size < total_band_size) {
++ dev_err(vdoa->dev, "iram_size:0x%lx is smaller than "
++ "request:0x%x!\n", iram_size, total_band_size);
++ return -EINVAL;
++ }
++ if (params->vfield_buf.prev_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA00,
++ vdoa->iram_paddr);
++ vdoa_write_register(vdoa, VDOAIEBA10,
++ vdoa->iram_paddr + band_size);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA00,
++ params->ieba0);
++ vdoa_write_register(vdoa, VDOAVEBA0,
++ params->vfield_buf.prev_veba);
++ vdoa->field = VDOA_PREV_FIELD;
++ }
++ if (params->vfield_buf.cur_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA01,
++ vdoa->iram_paddr + band_size * 2);
++ vdoa_write_register(vdoa, VDOAIEBA11,
++ vdoa->iram_paddr + band_size * 3);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA01,
++ params->ieba1);
++ vdoa_write_register(vdoa, VDOAVEBA1,
++ params->vfield_buf.cur_veba);
++ vdoa->field = VDOA_CURR_FIELD;
++ }
++ if (params->vfield_buf.next_veba) {
++ if (params->band_mode) {
++ vdoa_write_register(vdoa, VDOAIEBA02,
++ vdoa->iram_paddr + band_size * 4);
++ vdoa_write_register(vdoa, VDOAIEBA12,
++ vdoa->iram_paddr + band_size * 5);
++ } else
++ vdoa_write_register(vdoa, VDOAIEBA02,
++ params->ieba2);
++ vdoa_write_register(vdoa, VDOAVEBA2,
++ params->vfield_buf.next_veba);
++ vdoa->field = VDOA_NEXT_FIELD;
++ vdoa_read_register(vdoa, VDOAC, &data);
++ data |= VDOAC_THREE_FRAMES;
++ vdoa_write_register(vdoa, VDOAC, data);
++ }
++
++ if (!params->pfs)
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->band_lines);
++ vdoa_write_register(vdoa, VDOAVUBO,
++ params->vfield_buf.vubo);
++ dev_dbg(vdoa->dev, "total band_size:0x%x.\n", band_size*6);
++ } else if (params->band_mode) {
++ /* used for progressive frame resize on PrP channel */
++ BUG(); /* currently not support */
++ /* progressvie frame: band mode */
++ vdoa_write_register(vdoa, VDOAIEBA00, vdoa->iram_paddr);
++ vdoa_write_register(vdoa, VDOAIEBA10,
++ vdoa->iram_paddr + band_size);
++ if (!params->pfs)
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->band_lines);
++ dev_dbg(vdoa->dev, "total band_size:0x%x\n", band_size*2);
++ } else {
++ /* progressive frame: mem->mem, non-band mode */
++ vdoa->field = VDOA_FRAME;
++ vdoa_write_register(vdoa, VDOAVEBA0, params->vframe_buf.veba);
++ vdoa_write_register(vdoa, VDOAVUBO, params->vframe_buf.vubo);
++ vdoa_write_register(vdoa, VDOAIEBA00, params->ieba0);
++ if (!params->pfs)
++ /* note: iubo is relative value, based on ieba0 */
++ vdoa_write_register(vdoa, VDOAIUBO,
++ params->width * params->height);
++ }
++ vdoa->state = VDOA_SETUP;
++ return 0;
++}
++
++void vdoa_get_output_buf(vdoa_handle_t handle, struct vdoa_ipu_buf *buf)
++{
++ u32 data;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_SETUP, return);
++ vdoa->state = VDOA_GET_OBUF;
++ memset(buf, 0, sizeof(*buf));
++
++ vdoa_read_register(vdoa, VDOAC, &data);
++ switch (vdoa->field) {
++ case VDOA_FRAME:
++ case VDOA_PREV_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA00, &buf->ieba0);
++ if (data & VDOAC_SYNC_BAND_MODE)
++ vdoa_read_register(vdoa, VDOAIEBA10, &buf->ieba1);
++ break;
++ case VDOA_CURR_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA01, &buf->ieba0);
++ vdoa_read_register(vdoa, VDOAIEBA11, &buf->ieba1);
++ break;
++ case VDOA_NEXT_FIELD:
++ vdoa_read_register(vdoa, VDOAIEBA02, &buf->ieba0);
++ vdoa_read_register(vdoa, VDOAIEBA12, &buf->ieba1);
++ break;
++ default:
++ BUG();
++ break;
++ }
++ if (!(data & VDOAC_PFS_YUYV))
++ vdoa_read_register(vdoa, VDOAIUBO, &buf->iubo);
++}
++
++int vdoa_start(vdoa_handle_t handle, int timeout_ms)
++{
++ int ret;
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET_OBUF, return -EINVAL);
++ vdoa->state = VDOA_START;
++ init_completion(&vdoa->comp);
++ vdoa_write_register(vdoa, VDOAIST,
++ VDOAIEIST_TRANSFER_ERR | VDOAIEIST_TRANSFER_END);
++ vdoa_write_register(vdoa, VDOAIE,
++ VDOAIEIST_TRANSFER_ERR | VDOAIEIST_TRANSFER_END);
++
++ enable_irq(vdoa->irq);
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_START_XFER);
++ dump_registers(vdoa);
++
++ ret = wait_for_completion_timeout(&vdoa->comp,
++ msecs_to_jiffies(timeout_ms));
++
++ return ret > 0 ? 0 : -ETIMEDOUT;
++}
++
++void vdoa_stop(vdoa_handle_t handle)
++{
++ struct vdoa_info *vdoa = (struct vdoa_info *)handle;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_GET | VDOA_START | VDOA_INIRQ, return);
++ vdoa->state = VDOA_STOP;
++
++ disable_irq(vdoa->irq);
++
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_SWRST);
++}
++
++void vdoa_get_handle(vdoa_handle_t *handle)
++{
++ struct vdoa_info *vdoa = g_vdoa;
++
++ CHECK_NULL_PTR(handle);
++ *handle = (vdoa_handle_t *)NULL;
++ CHECK_STATE(VDOA_INIT, return);
++ mutex_lock(&vdoa_lock);
++ clk_prepare_enable(vdoa->vdoa_clk);
++ vdoa->state = VDOA_GET;
++ vdoa->field = VDOA_NULL;
++ vdoa_write_register(vdoa, VDOASRR, VDOASRR_SWRST);
++
++ *handle = (vdoa_handle_t *)vdoa;
++}
++
++void vdoa_put_handle(vdoa_handle_t *handle)
++{
++ struct vdoa_info *vdoa = (struct vdoa_info *)(*handle);
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_STOP, return);
++ if (vdoa != g_vdoa)
++ BUG();
++
++ clk_disable_unprepare(vdoa->vdoa_clk);
++ vdoa->state = VDOA_PUT;
++ *handle = (vdoa_handle_t *)NULL;
++ mutex_unlock(&vdoa_lock);
++}
++
++static irqreturn_t vdoa_irq_handler(int irq, void *data)
++{
++ u32 status, mask, val;
++ struct vdoa_info *vdoa = data;
++
++ CHECK_NULL_PTR(vdoa);
++ CHECK_STATE(VDOA_START, return IRQ_HANDLED);
++ vdoa->state = VDOA_INIRQ;
++ vdoa_read_register(vdoa, VDOAIST, &status);
++ vdoa_read_register(vdoa, VDOAIE, &mask);
++ val = status & mask;
++ vdoa_write_register(vdoa, VDOAIST, val);
++ if (VDOAIEIST_TRANSFER_ERR & val)
++ dev_err(vdoa->dev, "vdoa Transfer err irq!\n");
++ if (VDOAIEIST_TRANSFER_END & val)
++ dev_dbg(vdoa->dev, "vdoa Transfer end irq!\n");
++ if (0 == val) {
++ dev_err(vdoa->dev, "vdoa unknown irq!\n");
++ BUG();
++ }
++
++ complete(&vdoa->comp);
++ return IRQ_HANDLED;
++}
++
++/* IRAM Size in Kbytes, example:vdoa_iram_size=64, 64KBytes */
++static int __init vdoa_iram_size_setup(char *options)
++{
++ int ret;
++
++ ret = kstrtol(options, 0, &iram_size);
++ if (ret)
++ iram_size = 0;
++ else
++ iram_size *= SZ_1K;
++
++ return 1;
++}
++__setup("vdoa_iram_size=", vdoa_iram_size_setup);
++
++static const struct of_device_id imx_vdoa_dt_ids[] = {
++ { .compatible = "fsl,imx6q-vdoa", },
++ { /* sentinel */ }
++};
++
++static int vdoa_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct vdoa_info *vdoa;
++ struct resource *res;
++ struct resource *res_irq;
++ struct device *dev = &pdev->dev;
++ struct device_node *np = pdev->dev.of_node;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(dev, "can't get device resources\n");
++ return -ENOENT;
++ }
++
++ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++ if (!res_irq) {
++ dev_err(dev, "failed to get irq resource\n");
++ return -ENOENT;
++ }
++
++ vdoa = devm_kzalloc(dev, sizeof(struct vdoa_info), GFP_KERNEL);
++ if (!vdoa)
++ return -ENOMEM;
++ vdoa->dev = dev;
++
++ vdoa->reg_base = devm_ioremap_resource(&pdev->dev, res);
++ if (!vdoa->reg_base)
++ return -EBUSY;
++
++ vdoa->irq = res_irq->start;
++ ret = devm_request_irq(dev, vdoa->irq, vdoa_irq_handler, 0,
++ "vdoa", vdoa);
++ if (ret) {
++ dev_err(dev, "can't claim irq %d\n", vdoa->irq);
++ return ret;
++ }
++ disable_irq(vdoa->irq);
++
++ vdoa->vdoa_clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(vdoa->vdoa_clk)) {
++ dev_err(dev, "failed to get vdoa_clk\n");
++ return PTR_ERR(vdoa->vdoa_clk);
++ }
++
++ vdoa->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!vdoa->iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ return -ENOMEM;
++ }
++
++ if ((iram_size == 0) || (iram_size > MAX_VDOA_IRAM_SIZE))
++ iram_size = VDOA_IRAM_SIZE;
++
++ vdoa->iram_base = gen_pool_alloc(vdoa->iram_pool, iram_size);
++ if (!vdoa->iram_base) {
++ dev_err(&pdev->dev, "unable to alloc iram\n");
++ return -ENOMEM;
++ }
++
++ vdoa->iram_paddr = gen_pool_virt_to_phys(vdoa->iram_pool,
++ vdoa->iram_base);
++
++ dev_dbg(dev, "iram_base:0x%lx,iram_paddr:0x%lx,size:0x%lx\n",
++ vdoa->iram_base, vdoa->iram_paddr, iram_size);
++
++ vdoa->state = VDOA_INIT;
++ dev_set_drvdata(dev, vdoa);
++ g_vdoa = vdoa;
++ dev_info(dev, "i.MX Video Data Order Adapter(VDOA) driver probed\n");
++ return 0;
++}
++
++static int vdoa_remove(struct platform_device *pdev)
++{
++ struct vdoa_info *vdoa = dev_get_drvdata(&pdev->dev);
++
++ gen_pool_free(vdoa->iram_pool, vdoa->iram_base, iram_size);
++ kfree(vdoa);
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static struct platform_driver vdoa_driver = {
++ .driver = {
++ .name = "mxc_vdoa",
++ .of_match_table = imx_vdoa_dt_ids,
++ },
++ .probe = vdoa_probe,
++ .remove = vdoa_remove,
++};
++
++static int __init vdoa_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&vdoa_driver);
++ if (err) {
++ pr_err("vdoa_driver register failed\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++
++static void __exit vdoa_cleanup(void)
++{
++ platform_driver_unregister(&vdoa_driver);
++}
++
++module_init(vdoa_init);
++module_exit(vdoa_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX Video Data Order Adapter(VDOA) driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/drivers/mxc/ipu3/vdoa.h linux-xbian-imx6/drivers/mxc/ipu3/vdoa.h
+--- linux-4.1.3/drivers/mxc/ipu3/vdoa.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/ipu3/vdoa.h 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,69 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __VDOA_H__
++#define __VDOA_H__
++
++#define VDOA_PFS_YUYV (1)
++#define VDOA_PFS_NV12 (0)
++
++
++struct vfield_buf {
++ u32 prev_veba;
++ u32 cur_veba;
++ u32 next_veba;
++ u32 vubo;
++};
++
++struct vframe_buf {
++ u32 veba;
++ u32 vubo;
++};
++
++struct vdoa_params {
++ u32 width;
++ u32 height;
++ int vpu_stride;
++ int interlaced;
++ int scan_order;
++ int ipu_num;
++ int band_lines;
++ int band_mode;
++ int pfs;
++ u32 ieba0;
++ u32 ieba1;
++ u32 ieba2;
++ struct vframe_buf vframe_buf;
++ struct vfield_buf vfield_buf;
++};
++struct vdoa_ipu_buf {
++ u32 ieba0;
++ u32 ieba1;
++ u32 iubo;
++};
++
++struct vdoa_info;
++typedef void *vdoa_handle_t;
++
++int vdoa_setup(vdoa_handle_t handle, struct vdoa_params *params);
++void vdoa_get_output_buf(vdoa_handle_t handle, struct vdoa_ipu_buf *buf);
++int vdoa_start(vdoa_handle_t handle, int timeout_ms);
++void vdoa_stop(vdoa_handle_t handle);
++void vdoa_get_handle(vdoa_handle_t *handle);
++void vdoa_put_handle(vdoa_handle_t *handle);
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/Kconfig linux-xbian-imx6/drivers/mxc/Kconfig
+--- linux-4.1.3/drivers/mxc/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/Kconfig 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,25 @@
++# drivers/mxc/Kconfig
++
++if ARCH_MXC
++
++menu "MXC support drivers"
++
++config MXC_IPU
++ tristate "Image Processing Unit Driver"
++ select MXC_IPU_V3_FSL
++ depends on !IMX_IPUV3_CORE
++ help
++ If you plan to use the Image Processing unit, say
++ Y here. IPU is needed by Framebuffer and V4L2 drivers.
++
++source "drivers/mxc/gpu-viv/Kconfig"
++source "drivers/mxc/ipu3/Kconfig"
++source "drivers/mxc/asrc/Kconfig"
++source "drivers/mxc/vpu/Kconfig"
++source "drivers/mxc/hdmi-cec/Kconfig"
++source "drivers/mxc/mipi/Kconfig"
++source "drivers/mxc/mlb/Kconfig"
++
++endmenu
++
++endif
+diff -Nur linux-4.1.3/drivers/mxc/Makefile linux-xbian-imx6/drivers/mxc/Makefile
+--- linux-4.1.3/drivers/mxc/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/Makefile 2015-07-27 23:13:06.166979215 +0200
+@@ -0,0 +1,7 @@
++obj-$(CONFIG_MXC_GPU_VIV) += gpu-viv/
++obj-$(CONFIG_MXC_IPU_V3_FSL) += ipu3/
++obj-$(CONFIG_MXC_ASRC) += asrc/
++obj-$(CONFIG_MXC_VPU) += vpu/
++obj-$(CONFIG_MXC_HDMI_CEC) += hdmi-cec/
++obj-$(CONFIG_MXC_MIPI_CSI2) += mipi/
++obj-$(CONFIG_MXC_MLB) += mlb/
+diff -Nur linux-4.1.3/drivers/mxc/mipi/Kconfig linux-xbian-imx6/drivers/mxc/mipi/Kconfig
+--- linux-4.1.3/drivers/mxc/mipi/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/mipi/Kconfig 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,14 @@
++#
++# MIPI configuration
++#
++
++menu "MXC MIPI Support"
++
++config MXC_MIPI_CSI2
++ tristate "MIPI CSI2 support"
++ depends on SOC_IMX6Q
++ default n
++ ---help---
++ Say Y to get the MIPI CSI2 support.
++
++endmenu
+diff -Nur linux-4.1.3/drivers/mxc/mipi/Makefile linux-xbian-imx6/drivers/mxc/mipi/Makefile
+--- linux-4.1.3/drivers/mxc/mipi/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/mipi/Makefile 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,4 @@
++#
++# Makefile for the mipi interface driver
++#
++obj-$(CONFIG_MXC_MIPI_CSI2) += mxc_mipi_csi2.o
+diff -Nur linux-4.1.3/drivers/mxc/mipi/mxc_mipi_csi2.c linux-xbian-imx6/drivers/mxc/mipi/mxc_mipi_csi2.c
+--- linux-4.1.3/drivers/mxc/mipi/mxc_mipi_csi2.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/mipi/mxc_mipi_csi2.c 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,540 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/irqdesc.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/slab.h>
++#include <linux/of.h>
++
++#include <linux/mipi_csi2.h>
++
++#include "mxc_mipi_csi2.h"
++
++static struct mipi_csi2_info *gmipi_csi2;
++
++void _mipi_csi2_lock(struct mipi_csi2_info *info)
++{
++ if (!in_irq() && !in_softirq())
++ mutex_lock(&info->mutex_lock);
++}
++
++void _mipi_csi2_unlock(struct mipi_csi2_info *info)
++{
++ if (!in_irq() && !in_softirq())
++ mutex_unlock(&info->mutex_lock);
++}
++
++static inline void mipi_csi2_write(struct mipi_csi2_info *info,
++ unsigned value, unsigned offset)
++{
++ writel(value, info->mipi_csi2_base + offset);
++}
++
++static inline unsigned int mipi_csi2_read(struct mipi_csi2_info *info,
++ unsigned offset)
++{
++ return readl(info->mipi_csi2_base + offset);
++}
++
++/*!
++ * This function is called to enable the mipi csi2 interface.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++bool mipi_csi2_enable(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++
++ if (!info->mipi_en) {
++ info->mipi_en = true;
++ clk_prepare_enable(info->cfg_clk);
++ clk_prepare_enable(info->dphy_clk);
++ } else
++ mipi_dbg("mipi csi2 already enabled!\n");
++
++ status = info->mipi_en;
++
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_enable);
++
++/*!
++ * This function is called to disable the mipi csi2 interface.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++bool mipi_csi2_disable(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++
++ if (info->mipi_en) {
++ info->mipi_en = false;
++ clk_disable_unprepare(info->dphy_clk);
++ clk_disable_unprepare(info->cfg_clk);
++ } else
++ mipi_dbg("mipi csi2 already disabled!\n");
++
++ status = info->mipi_en;
++
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_disable);
++
++/*!
++ * This function is called to get mipi csi2 disable/enable status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns mipi csi2 status
++ */
++bool mipi_csi2_get_status(struct mipi_csi2_info *info)
++{
++ bool status;
++
++ _mipi_csi2_lock(info);
++ status = info->mipi_en;
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_get_status);
++
++/*!
++ * This function is called to set mipi lanes.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++unsigned int mipi_csi2_set_lanes(struct mipi_csi2_info *info)
++{
++ unsigned int lanes;
++
++ _mipi_csi2_lock(info);
++ mipi_csi2_write(info, info->lanes - 1, MIPI_CSI2_N_LANES);
++ lanes = mipi_csi2_read(info, MIPI_CSI2_N_LANES);
++ _mipi_csi2_unlock(info);
++
++ return lanes;
++}
++EXPORT_SYMBOL(mipi_csi2_set_lanes);
++
++/*!
++ * This function is called to set mipi data type.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns setted value
++ */
++unsigned int mipi_csi2_set_datatype(struct mipi_csi2_info *info,
++ unsigned int datatype)
++{
++ unsigned int dtype;
++
++ _mipi_csi2_lock(info);
++ info->datatype = datatype;
++ dtype = info->datatype;
++ _mipi_csi2_unlock(info);
++
++ return dtype;
++}
++EXPORT_SYMBOL(mipi_csi2_set_datatype);
++
++/*!
++ * This function is called to get mipi data type.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns mipi data type
++ */
++unsigned int mipi_csi2_get_datatype(struct mipi_csi2_info *info)
++{
++ unsigned int dtype;
++
++ _mipi_csi2_lock(info);
++ dtype = info->datatype;
++ _mipi_csi2_unlock(info);
++
++ return dtype;
++}
++EXPORT_SYMBOL(mipi_csi2_get_datatype);
++
++/*!
++ * This function is called to get mipi csi2 dphy status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns dphy status
++ */
++unsigned int mipi_csi2_dphy_status(struct mipi_csi2_info *info)
++{
++ unsigned int status;
++
++ _mipi_csi2_lock(info);
++ status = mipi_csi2_read(info, MIPI_CSI2_PHY_STATE);
++ _mipi_csi2_unlock(info);
++
++ return status;
++}
++EXPORT_SYMBOL(mipi_csi2_dphy_status);
++
++/*!
++ * This function is called to get mipi csi2 error1 status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns error1 value
++ */
++unsigned int mipi_csi2_get_error1(struct mipi_csi2_info *info)
++{
++ unsigned int err1;
++
++ _mipi_csi2_lock(info);
++ err1 = mipi_csi2_read(info, MIPI_CSI2_ERR1);
++ _mipi_csi2_unlock(info);
++
++ return err1;
++}
++EXPORT_SYMBOL(mipi_csi2_get_error1);
++
++/*!
++ * This function is called to get mipi csi2 error1 status.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns error1 value
++ */
++unsigned int mipi_csi2_get_error2(struct mipi_csi2_info *info)
++{
++ unsigned int err2;
++
++ _mipi_csi2_lock(info);
++ err2 = mipi_csi2_read(info, MIPI_CSI2_ERR2);
++ _mipi_csi2_unlock(info);
++
++ return err2;
++}
++EXPORT_SYMBOL(mipi_csi2_get_error2);
++
++/*!
++ * This function is called to enable mipi to ipu pixel clock.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++int mipi_csi2_pixelclk_enable(struct mipi_csi2_info *info)
++{
++ return clk_prepare_enable(info->pixel_clk);
++}
++EXPORT_SYMBOL(mipi_csi2_pixelclk_enable);
++
++/*!
++ * This function is called to disable mipi to ipu pixel clock.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++void mipi_csi2_pixelclk_disable(struct mipi_csi2_info *info)
++{
++ clk_disable_unprepare(info->pixel_clk);
++}
++EXPORT_SYMBOL(mipi_csi2_pixelclk_disable);
++
++/*!
++ * This function is called to power on mipi csi2.
++ *
++ * @param info mipi csi2 hander
++ * @return Returns 0 on success or negative error code on fail
++ */
++int mipi_csi2_reset(struct mipi_csi2_info *info)
++{
++ _mipi_csi2_lock(info);
++
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_PHY_SHUTDOWNZ);
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_DPHY_RSTZ);
++ mipi_csi2_write(info, 0x0, MIPI_CSI2_CSI2_RESETN);
++
++ mipi_csi2_write(info, 0x00000001, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000002, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00010044, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000014, MIPI_CSI2_PHY_TST_CTRL1);
++ mipi_csi2_write(info, 0x00000002, MIPI_CSI2_PHY_TST_CTRL0);
++ mipi_csi2_write(info, 0x00000000, MIPI_CSI2_PHY_TST_CTRL0);
++
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_PHY_SHUTDOWNZ);
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_DPHY_RSTZ);
++ mipi_csi2_write(info, 0xffffffff, MIPI_CSI2_CSI2_RESETN);
++
++ _mipi_csi2_unlock(info);
++
++ return 0;
++}
++EXPORT_SYMBOL(mipi_csi2_reset);
++
++/*!
++ * This function is called to get mipi csi2 info.
++ *
++ * @return Returns mipi csi2 info struct pointor
++ */
++struct mipi_csi2_info *mipi_csi2_get_info(void)
++{
++ return gmipi_csi2;
++}
++EXPORT_SYMBOL(mipi_csi2_get_info);
++
++/*!
++ * This function is called to get mipi csi2 bind ipu num.
++ *
++ * @return Returns mipi csi2 bind ipu num
++ */
++int mipi_csi2_get_bind_ipu(struct mipi_csi2_info *info)
++{
++ int ipu_id;
++
++ _mipi_csi2_lock(info);
++ ipu_id = info->ipu_id;
++ _mipi_csi2_unlock(info);
++
++ return ipu_id;
++}
++EXPORT_SYMBOL(mipi_csi2_get_bind_ipu);
++
++/*!
++ * This function is called to get mipi csi2 bind csi num.
++ *
++ * @return Returns mipi csi2 bind csi num
++ */
++unsigned int mipi_csi2_get_bind_csi(struct mipi_csi2_info *info)
++{
++ unsigned int csi_id;
++
++ _mipi_csi2_lock(info);
++ csi_id = info->csi_id;
++ _mipi_csi2_unlock(info);
++
++ return csi_id;
++}
++EXPORT_SYMBOL(mipi_csi2_get_bind_csi);
++
++/*!
++ * This function is called to get mipi csi2 virtual channel.
++ *
++ * @return Returns mipi csi2 virtual channel num
++ */
++unsigned int mipi_csi2_get_virtual_channel(struct mipi_csi2_info *info)
++{
++ unsigned int v_channel;
++
++ _mipi_csi2_lock(info);
++ v_channel = info->v_channel;
++ _mipi_csi2_unlock(info);
++
++ return v_channel;
++}
++EXPORT_SYMBOL(mipi_csi2_get_virtual_channel);
++
++/**
++ * This function is called by the driver framework to initialize the MIPI CSI2
++ * device.
++ *
++ * @param pdev The device structure for the MIPI CSI2 passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int mipi_csi2_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct device_node *np = pdev->dev.of_node;
++ struct resource *res;
++ u32 mipi_csi2_dphy_ver;
++ int ret;
++
++ gmipi_csi2 = kmalloc(sizeof(struct mipi_csi2_info), GFP_KERNEL);
++ if (!gmipi_csi2) {
++ ret = -ENOMEM;
++ goto alloc_failed;
++ }
++
++ ret = of_property_read_u32(np, "ipu_id", &(gmipi_csi2->ipu_id));
++ if (ret) {
++ dev_err(&pdev->dev, "ipu_id missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "csi_id", &(gmipi_csi2->csi_id));
++ if (ret) {
++ dev_err(&pdev->dev, "csi_id missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "v_channel", &(gmipi_csi2->v_channel));
++ if (ret) {
++ dev_err(&pdev->dev, "v_channel missing or invalid\n");
++ goto err;
++ }
++
++ ret = of_property_read_u32(np, "lanes", &(gmipi_csi2->lanes));
++ if (ret) {
++ dev_err(&pdev->dev, "lanes missing or invalid\n");
++ goto err;
++ }
++
++ if ((gmipi_csi2->ipu_id < 0) || (gmipi_csi2->ipu_id > 1) ||
++ (gmipi_csi2->csi_id > 1) || (gmipi_csi2->v_channel > 3) ||
++ (gmipi_csi2->lanes > 4)) {
++ dev_err(&pdev->dev, "invalid param for mipi csi2!\n");
++ ret = -EINVAL;
++ goto err;
++ }
++
++ /* initialize mutex */
++ mutex_init(&gmipi_csi2->mutex_lock);
++
++ /* get mipi csi2 informaiton */
++ gmipi_csi2->pdev = pdev;
++ gmipi_csi2->mipi_en = false;
++
++ gmipi_csi2->cfg_clk = devm_clk_get(dev, "cfg_clk");
++ if (IS_ERR(gmipi_csi2->cfg_clk)) {
++ dev_err(&pdev->dev, "failed to get cfg_clk\n");
++ ret = PTR_ERR(gmipi_csi2->cfg_clk);
++ goto err;
++ }
++
++ /* get mipi dphy clk */
++ gmipi_csi2->dphy_clk = devm_clk_get(dev, "dphy_clk");
++ if (IS_ERR(gmipi_csi2->dphy_clk)) {
++ dev_err(&pdev->dev, "failed to get dphy pll_ref_clk\n");
++ ret = PTR_ERR(gmipi_csi2->dphy_clk);
++ goto err;
++ }
++
++ /* get mipi to ipu pixel clk */
++ gmipi_csi2->pixel_clk = devm_clk_get(dev, "pixel_clk");
++ if (IS_ERR(gmipi_csi2->pixel_clk)) {
++ dev_err(&pdev->dev, "failed to get mipi pixel clk\n");
++ ret = PTR_ERR(gmipi_csi2->pixel_clk);
++ goto err;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ ret = -ENODEV;
++ goto err;
++ }
++
++ /* mipi register mapping */
++ gmipi_csi2->mipi_csi2_base = ioremap(res->start, PAGE_SIZE);
++ if (!gmipi_csi2->mipi_csi2_base) {
++ ret = -ENOMEM;
++ goto err;
++ }
++
++ /* mipi dphy clk enable for register access */
++ clk_prepare_enable(gmipi_csi2->dphy_clk);
++ /* get mipi csi2 dphy version */
++ mipi_csi2_dphy_ver = mipi_csi2_read(gmipi_csi2, MIPI_CSI2_VERSION);
++
++ clk_disable_unprepare(gmipi_csi2->dphy_clk);
++
++ platform_set_drvdata(pdev, gmipi_csi2);
++
++ dev_info(&pdev->dev, "i.MX MIPI CSI2 driver probed\n");
++ dev_info(&pdev->dev, "i.MX MIPI CSI2 dphy version is 0x%x\n",
++ mipi_csi2_dphy_ver);
++
++ return 0;
++
++err:
++ kfree(gmipi_csi2);
++alloc_failed:
++ dev_err(&pdev->dev, "i.MX MIPI CSI2 driver probed - error\n");
++ return ret;
++}
++
++static int mipi_csi2_remove(struct platform_device *pdev)
++{
++ /* unmapping mipi register */
++ iounmap(gmipi_csi2->mipi_csi2_base);
++
++ kfree(gmipi_csi2);
++
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static const struct of_device_id imx_mipi_csi2_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mipi-csi2", },
++ { /* sentinel */ }
++};
++
++static struct platform_driver mipi_csi2_driver = {
++ .driver = {
++ .name = "mxc_mipi_csi2",
++ .of_match_table = imx_mipi_csi2_dt_ids,
++ },
++ .probe = mipi_csi2_probe,
++ .remove = mipi_csi2_remove,
++};
++
++static int __init mipi_csi2_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&mipi_csi2_driver);
++ if (err) {
++ pr_err("mipi_csi2_driver register failed\n");
++ return -ENODEV;
++ }
++
++ pr_info("MIPI CSI2 driver module loaded\n");
++
++ return 0;
++}
++
++static void __exit mipi_csi2_cleanup(void)
++{
++ platform_driver_unregister(&mipi_csi2_driver);
++}
++
++subsys_initcall(mipi_csi2_init);
++module_exit(mipi_csi2_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX MIPI CSI2 driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/drivers/mxc/mipi/mxc_mipi_csi2.h linux-xbian-imx6/drivers/mxc/mipi/mxc_mipi_csi2.h
+--- linux-4.1.3/drivers/mxc/mipi/mxc_mipi_csi2.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/mipi/mxc_mipi_csi2.h 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,46 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MXC_MIPI_CSI2_H__
++#define __MXC_MIPI_CSI2_H__
++
++#ifdef DEBUG
++#define mipi_dbg(fmt, ...) \
++ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
++#else
++#define mipi_dbg(fmt, ...)
++#endif
++
++/* driver private data */
++struct mipi_csi2_info {
++ bool mipi_en;
++ int ipu_id;
++ unsigned int csi_id;
++ unsigned int v_channel;
++ unsigned int lanes;
++ unsigned int datatype;
++ struct clk *cfg_clk;
++ struct clk *dphy_clk;
++ struct clk *pixel_clk;
++ void __iomem *mipi_csi2_base;
++ struct platform_device *pdev;
++
++ struct mutex mutex_lock;
++};
++
++#endif
+diff -Nur linux-4.1.3/drivers/mxc/mlb/Kconfig linux-xbian-imx6/drivers/mxc/mlb/Kconfig
+--- linux-4.1.3/drivers/mxc/mlb/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/mlb/Kconfig 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,17 @@
++#
++# MLB150 configuration
++#
++
++menu "MXC Media Local Bus Driver"
++
++config MXC_MLB
++ boolean
++
++config MXC_MLB150
++ tristate "MLB150 support"
++ depends on SOC_IMX6Q
++ select MXC_MLB
++ ---help---
++ Say Y to get the MLB150 support.
++
++endmenu
+diff -Nur linux-4.1.3/drivers/mxc/mlb/Makefile linux-xbian-imx6/drivers/mxc/mlb/Makefile
+--- linux-4.1.3/drivers/mxc/mlb/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/mlb/Makefile 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,5 @@
++#
++# Makefile for the i.MX6Q/DL MLB150 driver
++#
++
++obj-$(CONFIG_MXC_MLB150) += mxc_mlb150.o
+diff -Nur linux-4.1.3/drivers/mxc/mlb/mxc_mlb150.c linux-xbian-imx6/drivers/mxc/mlb/mxc_mlb150.c
+--- linux-4.1.3/drivers/mxc/mlb/mxc_mlb150.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/mlb/mxc_mlb150.c 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,2778 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/cdev.h>
++#include <linux/circ_buf.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/genalloc.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxc_mlb.h>
++#include <linux/of.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/regulator/consumer.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/uaccess.h>
++
++#define DRIVER_NAME "mxc_mlb150"
++
++/*
++ * MLB module memory map registers define
++ */
++#define REG_MLBC0 0x0
++#define MLBC0_MLBEN (0x1)
++#define MLBC0_MLBCLK_MASK (0x7 << 2)
++#define MLBC0_MLBCLK_SHIFT (2)
++#define MLBC0_MLBPEN (0x1 << 5)
++#define MLBC0_MLBLK (0x1 << 7)
++#define MLBC0_ASYRETRY (0x1 << 12)
++#define MLBC0_CTLRETRY (0x1 << 12)
++#define MLBC0_FCNT_MASK (0x7 << 15)
++#define MLBC0_FCNT_SHIFT (15)
++
++#define REG_MLBPC0 0x8
++#define MLBPC0_MCLKHYS (0x1 << 11)
++
++#define REG_MS0 0xC
++#define REG_MS1 0x14
++
++#define REG_MSS 0x20
++#define MSS_RSTSYSCMD (0x1)
++#define MSS_LKSYSCMD (0x1 << 1)
++#define MSS_ULKSYSCMD (0x1 << 2)
++#define MSS_CSSYSCMD (0x1 << 3)
++#define MSS_SWSYSCMD (0x1 << 4)
++#define MSS_SERVREQ (0x1 << 5)
++
++#define REG_MSD 0x24
++
++#define REG_MIEN 0x2C
++#define MIEN_ISOC_PE (0x1)
++#define MIEN_ISOC_BUFO (0x1 << 1)
++#define MIEN_SYNC_PE (0x1 << 16)
++#define MIEN_ARX_DONE (0x1 << 17)
++#define MIEN_ARX_PE (0x1 << 18)
++#define MIEN_ARX_BREAK (0x1 << 19)
++#define MIEN_ATX_DONE (0x1 << 20)
++#define MIEN_ATX_PE (0x1 << 21)
++#define MIEN_ATX_BREAK (0x1 << 22)
++#define MIEN_CRX_DONE (0x1 << 24)
++#define MIEN_CRX_PE (0x1 << 25)
++#define MIEN_CRX_BREAK (0x1 << 26)
++#define MIEN_CTX_DONE (0x1 << 27)
++#define MIEN_CTX_PE (0x1 << 28)
++#define MIEN_CTX_BREAK (0x1 << 29)
++
++#define REG_MLBPC2 0x34
++#define REG_MLBPC1 0x38
++#define MLBPC1_VAL (0x00000888)
++
++#define REG_MLBC1 0x3C
++#define MLBC1_LOCK (0x1 << 6)
++#define MLBC1_CLKM (0x1 << 7)
++#define MLBC1_NDA_MASK (0xFF << 8)
++#define MLBC1_NDA_SHIFT (8)
++
++#define REG_HCTL 0x80
++#define HCTL_RST0 (0x1)
++#define HCTL_RST1 (0x1 << 1)
++#define HCTL_EN (0x1 << 15)
++
++#define REG_HCMR0 0x88
++#define REG_HCMR1 0x8C
++#define REG_HCER0 0x90
++#define REG_HCER1 0x94
++#define REG_HCBR0 0x98
++#define REG_HCBR1 0x9C
++
++#define REG_MDAT0 0xC0
++#define REG_MDAT1 0xC4
++#define REG_MDAT2 0xC8
++#define REG_MDAT3 0xCC
++
++#define REG_MDWE0 0xD0
++#define REG_MDWE1 0xD4
++#define REG_MDWE2 0xD8
++#define REG_MDWE3 0xDC
++
++#define REG_MCTL 0xE0
++#define MCTL_XCMP (0x1)
++
++#define REG_MADR 0xE4
++#define MADR_WNR (0x1 << 31)
++#define MADR_TB (0x1 << 30)
++#define MADR_ADDR_MASK (0x7f << 8)
++#define MADR_ADDR_SHIFT (0)
++
++#define REG_ACTL 0x3C0
++#define ACTL_MPB (0x1 << 4)
++#define ACTL_DMAMODE (0x1 << 2)
++#define ACTL_SMX (0x1 << 1)
++#define ACTL_SCE (0x1)
++
++#define REG_ACSR0 0x3D0
++#define REG_ACSR1 0x3D4
++#define REG_ACMR0 0x3D8
++#define REG_ACMR1 0x3DC
++
++#define REG_CAT_MDATn(ch) (REG_MDAT0 + ((ch % 8) >> 1) * 4)
++#define REG_CAT_MDWEn(ch) (REG_MDWE0 + ((ch % 8) >> 1) * 4)
++
++#define INT_AHB0_CH_START (0)
++#define INT_AHB1_CH_START (32)
++
++#define LOGIC_CH_NUM (64)
++#define BUF_CDT_OFFSET (0x0)
++#define BUF_ADT_OFFSET (0x40)
++#define BUF_CAT_MLB_OFFSET (0x80)
++#define BUF_CAT_HBI_OFFSET (0x88)
++#define BUF_CTR_END_OFFSET (0x8F)
++
++#define CAT_MODE_RX (0x1 << 0)
++#define CAT_MODE_TX (0x1 << 1)
++#define CAT_MODE_INBOUND_DMA (0x1 << 8)
++#define CAT_MODE_OUTBOUND_DMA (0x1 << 9)
++
++#define CH_SYNC_DEFAULT_QUAD (1)
++#define CH_SYNC_MAX_QUAD (15)
++#define CH_SYNC_CDT_BUF_DEP (CH_SYNC_DEFAULT_QUAD * 4 * 4)
++#define CH_SYNC_ADT_BUF_MULTI (4)
++#define CH_SYNC_ADT_BUF_DEP (CH_SYNC_CDT_BUF_DEP * CH_SYNC_ADT_BUF_MULTI)
++#define CH_SYNC_BUF_SZ (CH_SYNC_MAX_QUAD * 4 * 4 * \
++ CH_SYNC_ADT_BUF_MULTI)
++#define CH_CTRL_CDT_BUF_DEP (64)
++#define CH_CTRL_ADT_BUF_DEP (CH_CTRL_CDT_BUF_DEP)
++#define CH_CTRL_BUF_SZ (CH_CTRL_ADT_BUF_DEP)
++#define CH_ASYNC_MDP_PACKET_LEN (1024)
++#define CH_ASYNC_MEP_PACKET_LEN (1536)
++#define CH_ASYNC_CDT_BUF_DEP (CH_ASYNC_MEP_PACKET_LEN)
++#define CH_ASYNC_ADT_BUF_DEP (CH_ASYNC_CDT_BUF_DEP)
++#define CH_ASYNC_BUF_SZ (CH_ASYNC_ADT_BUF_DEP)
++#define CH_ISOC_BLK_SIZE_188 (188)
++#define CH_ISOC_BLK_SIZE_196 (196)
++#define CH_ISOC_BLK_SIZE (CH_ISOC_BLK_SIZE_188)
++#define CH_ISOC_BLK_NUM (1)
++#define CH_ISOC_CDT_BUF_DEP (CH_ISOC_BLK_SIZE * CH_ISOC_BLK_NUM)
++#define CH_ISOC_ADT_BUF_DEP (CH_ISOC_CDT_BUF_DEP)
++#define CH_ISOC_BUF_SZ (1024)
++
++#define CH_SYNC_DBR_BUF_OFFSET (0x0)
++#define CH_CTRL_DBR_BUF_OFFSET (CH_SYNC_DBR_BUF_OFFSET + \
++ 2 * (CH_SYNC_MAX_QUAD * 4 * 4))
++#define CH_ASYNC_DBR_BUF_OFFSET (CH_CTRL_DBR_BUF_OFFSET + \
++ 2 * CH_CTRL_CDT_BUF_DEP)
++#define CH_ISOC_DBR_BUF_OFFSET (CH_ASYNC_DBR_BUF_OFFSET + \
++ 2 * CH_ASYNC_CDT_BUF_DEP)
++
++#define DBR_BUF_START 0x00000
++
++#define CDT_LEN (16)
++#define ADT_LEN (16)
++#define CAT_LEN (2)
++
++#define CDT_SZ (CDT_LEN * LOGIC_CH_NUM)
++#define ADT_SZ (ADT_LEN * LOGIC_CH_NUM)
++#define CAT_SZ (CAT_LEN * LOGIC_CH_NUM * 2)
++
++#define CDT_BASE(base) (base + BUF_CDT_OFFSET)
++#define ADT_BASE(base) (base + BUF_ADT_OFFSET)
++#define CAT_MLB_BASE(base) (base + BUF_CAT_MLB_OFFSET)
++#define CAT_HBI_BASE(base) (base + BUF_CAT_HBI_OFFSET)
++
++#define CDTn_ADDR(base, n) (base + BUF_CDT_OFFSET + n * CDT_LEN)
++#define ADTn_ADDR(base, n) (base + BUF_ADT_OFFSET + n * ADT_LEN)
++#define CATn_MLB_ADDR(base, n) (base + BUF_CAT_MLB_OFFSET + n * CAT_LEN)
++#define CATn_HBI_ADDR(base, n) (base + BUF_CAT_HBI_OFFSET + n * CAT_LEN)
++
++#define CAT_CL_SHIFT (0x0)
++#define CAT_CT_SHIFT (8)
++#define CAT_CE (0x1 << 11)
++#define CAT_RNW (0x1 << 12)
++#define CAT_MT (0x1 << 13)
++#define CAT_FCE (0x1 << 14)
++#define CAT_MFE (0x1 << 14)
++
++#define CDT_WSBC_SHIFT (14)
++#define CDT_WPC_SHIFT (11)
++#define CDT_RSBC_SHIFT (30)
++#define CDT_RPC_SHIFT (27)
++#define CDT_WPC_1_SHIFT (12)
++#define CDT_RPC_1_SHIFT (28)
++#define CDT_WPTR_SHIFT (0)
++#define CDT_SYNC_WSTS_MASK (0x0000f000)
++#define CDT_SYNC_WSTS_SHIFT (12)
++#define CDT_CTRL_ASYNC_WSTS_MASK (0x0000f000)
++#define CDT_CTRL_ASYNC_WSTS_SHIFT (12)
++#define CDT_ISOC_WSTS_MASK (0x0000e000)
++#define CDT_ISOC_WSTS_SHIFT (13)
++#define CDT_RPTR_SHIFT (16)
++#define CDT_SYNC_RSTS_MASK (0xf0000000)
++#define CDT_SYNC_RSTS_SHIFT (28)
++#define CDT_CTRL_ASYNC_RSTS_MASK (0xf0000000)
++#define CDT_CTRL_ASYNC_RSTS_SHIFT (28)
++#define CDT_ISOC_RSTS_MASK (0xe0000000)
++#define CDT_ISOC_RSTS_SHIFT (29)
++#define CDT_CTRL_ASYNC_WSTS_1 (0x1 << 14)
++#define CDT_CTRL_ASYNC_RSTS_1 (0x1 << 15)
++#define CDT_BD_SHIFT (0)
++#define CDT_BA_SHIFT (16)
++#define CDT_BS_SHIFT (0)
++#define CDT_BF_SHIFT (31)
++
++#define ADT_PG (0x1 << 13)
++#define ADT_LE (0x1 << 14)
++#define ADT_CE (0x1 << 15)
++#define ADT_BD1_SHIFT (0)
++#define ADT_ERR1 (0x1 << 13)
++#define ADT_DNE1 (0x1 << 14)
++#define ADT_RDY1 (0x1 << 15)
++#define ADT_BD2_SHIFT (16)
++#define ADT_ERR2 (0x1 << 29)
++#define ADT_DNE2 (0x1 << 30)
++#define ADT_RDY2 (0x1 << 31)
++#define ADT_BA1_SHIFT (0x0)
++#define ADT_BA2_SHIFT (0x0)
++#define ADT_PS1 (0x1 << 12)
++#define ADT_PS2 (0x1 << 28)
++#define ADT_MEP1 (0x1 << 11)
++#define ADT_MEP2 (0x1 << 27)
++
++#define MLB_MINOR_DEVICES 4
++#define MLB_CONTROL_DEV_NAME "ctrl"
++#define MLB_ASYNC_DEV_NAME "async"
++#define MLB_SYNC_DEV_NAME "sync"
++#define MLB_ISOC_DEV_NAME "isoc"
++
++#define TX_CHANNEL 0
++#define RX_CHANNEL 1
++
++#define TRANS_RING_NODES (1 << 3)
++
++enum MLB_CTYPE {
++ MLB_CTYPE_SYNC,
++ MLB_CTYPE_CTRL,
++ MLB_CTYPE_ASYNC,
++ MLB_CTYPE_ISOC,
++};
++
++enum CLK_SPEED {
++ CLK_256FS,
++ CLK_512FS,
++ CLK_1024FS,
++ CLK_2048FS,
++ CLK_3072FS,
++ CLK_4096FS,
++ CLK_6144FS,
++ CLK_8192FS,
++};
++
++struct mlb_ringbuf {
++ s8 *virt_bufs[TRANS_RING_NODES];
++ u32 phy_addrs[TRANS_RING_NODES];
++ s32 head;
++ s32 tail;
++ s32 unit_size;
++ s32 total_size;
++ rwlock_t rb_lock ____cacheline_aligned; /* ring index lock */
++};
++
++struct mlb_channel_info {
++ /* Input MLB channel address */
++ u32 address;
++ /* Internal AHB channel label */
++ u32 cl;
++ /* DBR buf head */
++ u32 dbr_buf_head;
++};
++
++struct mlb_dev_info {
++ /* device node name */
++ const char dev_name[20];
++ /* channel type */
++ const unsigned int channel_type;
++ /* ch fps */
++ enum CLK_SPEED fps;
++ /* channel info for tx/rx */
++ struct mlb_channel_info channels[2];
++ /* ring buffer */
++ u8 *rbuf_base_virt;
++ u32 rbuf_base_phy;
++ struct mlb_ringbuf rx_rbuf;
++ struct mlb_ringbuf tx_rbuf;
++ /* exception event */
++ unsigned long ex_event;
++ /* tx busy indicator */
++ unsigned long tx_busy;
++ /* channel started up or not */
++ atomic_t on;
++ /* device open count */
++ atomic_t opencnt;
++ /* wait queue head for channel */
++ wait_queue_head_t rx_wq;
++ wait_queue_head_t tx_wq;
++ /* TX OK */
++ s32 tx_ok;
++ /* spinlock for event access */
++ spinlock_t event_lock;
++ /*
++ * Block size for isoc mode
++ * This variable can be configured in ioctl
++ */
++ u32 isoc_blksz;
++ /*
++ * Quads number for sync mode
++ * This variable can be confifured in ioctl
++ */
++ u32 sync_quad;
++ /* Buffer depth in cdt */
++ u32 cdt_buf_dep;
++ /* Buffer depth in adt */
++ u32 adt_buf_dep;
++ /* Buffer size to hold data */
++ u32 buf_size;
++};
++
++struct mlb_data {
++ struct mlb_dev_info *devinfo;
++ struct clk *clk_mlb3p;
++ struct clk *clk_mlb6p;
++ struct cdev cdev;
++ struct class *class; /* device class */
++ dev_t firstdev;
++#ifdef CONFIG_REGULATOR
++ struct regulator *nvcc;
++#endif
++ void __iomem *membase; /* mlb module base address */
++ struct gen_pool *iram_pool;
++ u32 iram_size;
++ u32 irq_ahb0;
++ u32 irq_ahb1;
++ u32 irq_mlb;
++};
++
++/*
++ * For optimization, we use fixed channel label for
++ * input channels of each mode
++ * SYNC: CL = 0 for RX, CL = 64 for TX
++ * CTRL: CL = 1 for RX, CL = 65 for TX
++ * ASYNC: CL = 2 for RX, CL = 66 for TX
++ * ISOC: CL = 3 for RX, CL = 67 for TX
++ */
++#define SYNC_RX_CL_AHB0 0
++#define CTRL_RX_CL_AHB0 1
++#define ASYNC_RX_CL_AHB0 2
++#define ISOC_RX_CL_AHB0 3
++#define SYNC_TX_CL_AHB0 4
++#define CTRL_TX_CL_AHB0 5
++#define ASYNC_TX_CL_AHB0 6
++#define ISOC_TX_CL_AHB0 7
++
++#define SYNC_RX_CL_AHB1 32
++#define CTRL_RX_CL_AHB1 33
++#define ASYNC_RX_CL_AHB1 34
++#define ISOC_RX_CL_AHB1 35
++#define SYNC_TX_CL_AHB1 36
++#define CTRL_TX_CL_AHB1 37
++#define ASYNC_TX_CL_AHB1 38
++#define ISOC_TX_CL_AHB1 39
++
++#define SYNC_RX_CL SYNC_RX_CL_AHB0
++#define CTRL_RX_CL CTRL_RX_CL_AHB0
++#define ASYNC_RX_CL ASYNC_RX_CL_AHB0
++#define ISOC_RX_CL ISOC_RX_CL_AHB0
++
++#define SYNC_TX_CL SYNC_TX_CL_AHB0
++#define CTRL_TX_CL CTRL_TX_CL_AHB0
++#define ASYNC_TX_CL ASYNC_TX_CL_AHB0
++#define ISOC_TX_CL ISOC_TX_CL_AHB0
++
++static struct mlb_dev_info mlb_devinfo[MLB_MINOR_DEVICES] = {
++ {
++ .dev_name = MLB_SYNC_DEV_NAME,
++ .channel_type = MLB_CTYPE_SYNC,
++ .channels = {
++ [0] = {
++ .cl = SYNC_TX_CL,
++ .dbr_buf_head = CH_SYNC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = SYNC_RX_CL,
++ .dbr_buf_head = CH_SYNC_DBR_BUF_OFFSET
++ + CH_SYNC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_SYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[0].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_SYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[0].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_SYNC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_SYNC_ADT_BUF_DEP,
++ .buf_size = CH_SYNC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[0].event_lock),
++ },
++ {
++ .dev_name = MLB_CONTROL_DEV_NAME,
++ .channel_type = MLB_CTYPE_CTRL,
++ .channels = {
++ [0] = {
++ .cl = CTRL_TX_CL,
++ .dbr_buf_head = CH_CTRL_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = CTRL_RX_CL,
++ .dbr_buf_head = CH_CTRL_DBR_BUF_OFFSET
++ + CH_CTRL_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_CTRL_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[1].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_CTRL_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[1].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_CTRL_CDT_BUF_DEP,
++ .adt_buf_dep = CH_CTRL_ADT_BUF_DEP,
++ .buf_size = CH_CTRL_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[1].event_lock),
++ },
++ {
++ .dev_name = MLB_ASYNC_DEV_NAME,
++ .channel_type = MLB_CTYPE_ASYNC,
++ .channels = {
++ [0] = {
++ .cl = ASYNC_TX_CL,
++ .dbr_buf_head = CH_ASYNC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = ASYNC_RX_CL,
++ .dbr_buf_head = CH_ASYNC_DBR_BUF_OFFSET
++ + CH_ASYNC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_ASYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[2].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_ASYNC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[2].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_ASYNC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_ASYNC_ADT_BUF_DEP,
++ .buf_size = CH_ASYNC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[2].event_lock),
++ },
++ {
++ .dev_name = MLB_ISOC_DEV_NAME,
++ .channel_type = MLB_CTYPE_ISOC,
++ .channels = {
++ [0] = {
++ .cl = ISOC_TX_CL,
++ .dbr_buf_head = CH_ISOC_DBR_BUF_OFFSET,
++ },
++ [1] = {
++ .cl = ISOC_RX_CL,
++ .dbr_buf_head = CH_ISOC_DBR_BUF_OFFSET
++ + CH_ISOC_BUF_SZ,
++ },
++ },
++ .rx_rbuf = {
++ .unit_size = CH_ISOC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[3].rx_rbuf.rb_lock),
++ },
++ .tx_rbuf = {
++ .unit_size = CH_ISOC_BUF_SZ,
++ .rb_lock =
++ __RW_LOCK_UNLOCKED(mlb_devinfo[3].tx_rbuf.rb_lock),
++ },
++ .cdt_buf_dep = CH_ISOC_CDT_BUF_DEP,
++ .adt_buf_dep = CH_ISOC_ADT_BUF_DEP,
++ .buf_size = CH_ISOC_BUF_SZ,
++ .on = ATOMIC_INIT(0),
++ .opencnt = ATOMIC_INIT(0),
++ .event_lock = __SPIN_LOCK_UNLOCKED(mlb_devinfo[3].event_lock),
++ .isoc_blksz = CH_ISOC_BLK_SIZE_188,
++ },
++};
++
++static void __iomem *mlb_base;
++
++DEFINE_SPINLOCK(ctr_lock);
++
++#ifdef DEBUG
++#define DUMP_REG(reg) pr_debug(#reg": 0x%08x\n", __raw_readl(mlb_base + reg))
++
++static void mlb150_dev_dump_reg(void)
++{
++ pr_debug("mxc_mlb150: Dump registers:\n");
++ DUMP_REG(REG_MLBC0);
++ DUMP_REG(REG_MLBPC0);
++ DUMP_REG(REG_MS0);
++ DUMP_REG(REG_MS1);
++ DUMP_REG(REG_MSS);
++ DUMP_REG(REG_MSD);
++ DUMP_REG(REG_MIEN);
++ DUMP_REG(REG_MLBPC2);
++ DUMP_REG(REG_MLBPC1);
++ DUMP_REG(REG_MLBC1);
++ DUMP_REG(REG_HCTL);
++ DUMP_REG(REG_HCMR0);
++ DUMP_REG(REG_HCMR1);
++ DUMP_REG(REG_HCER0);
++ DUMP_REG(REG_HCER1);
++ DUMP_REG(REG_HCBR0);
++ DUMP_REG(REG_HCBR1);
++ DUMP_REG(REG_MDAT0);
++ DUMP_REG(REG_MDAT1);
++ DUMP_REG(REG_MDAT2);
++ DUMP_REG(REG_MDAT3);
++ DUMP_REG(REG_MDWE0);
++ DUMP_REG(REG_MDWE1);
++ DUMP_REG(REG_MDWE2);
++ DUMP_REG(REG_MDWE3);
++ DUMP_REG(REG_MCTL);
++ DUMP_REG(REG_MADR);
++ DUMP_REG(REG_ACTL);
++ DUMP_REG(REG_ACSR0);
++ DUMP_REG(REG_ACSR1);
++ DUMP_REG(REG_ACMR0);
++ DUMP_REG(REG_ACMR1);
++}
++
++static void mlb150_dev_dump_hex(const u8 *buf, u32 len)
++{
++ print_hex_dump(KERN_DEBUG, "CTR DUMP:",
++ DUMP_PREFIX_OFFSET, 8, 1, buf, len, 0);
++}
++#endif
++
++static inline void mlb150_dev_enable_ctr_write(u32 mdat0_bits_en,
++ u32 mdat1_bits_en, u32 mdat2_bits_en, u32 mdat3_bits_en)
++{
++ __raw_writel(mdat0_bits_en, mlb_base + REG_MDWE0);
++ __raw_writel(mdat1_bits_en, mlb_base + REG_MDWE1);
++ __raw_writel(mdat2_bits_en, mlb_base + REG_MDWE2);
++ __raw_writel(mdat3_bits_en, mlb_base + REG_MDWE3);
++}
++
++#ifdef DEBUG
++static inline u8 mlb150_dev_dbr_read(u32 dbr_addr)
++{
++ s32 timeout = 1000;
++ u8 dbr_val = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(MADR_TB | dbr_addr,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (0 == timeout) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ return -ETIME;
++ }
++
++ dbr_val = __raw_readl(mlb_base + REG_MDAT0) & 0x000000ff;
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return dbr_val;
++}
++
++static inline s32 mlb150_dev_dbr_write(u32 dbr_addr, u8 dbr_val)
++{
++ s32 timeout = 1000;
++ u32 mdat0 = dbr_val & 0x000000ff;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(mdat0, mlb_base + REG_MDAT0);
++
++ __raw_writel(MADR_WNR | MADR_TB | dbr_addr,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ return -ETIME;
++ }
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_dbr_dump(u32 addr, u32 size)
++{
++ u8 *dump_buf = NULL;
++ u8 *buf_ptr = NULL;
++ s32 i;
++
++ dump_buf = kzalloc(size, GFP_KERNEL);
++ if (!dump_buf) {
++ pr_err("can't allocate enough memory\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0, buf_ptr = dump_buf;
++ i < size; ++i, ++buf_ptr)
++ *buf_ptr = mlb150_dev_dbr_read(addr + i);
++
++ mlb150_dev_dump_hex(dump_buf, size);
++
++ kfree(dump_buf);
++
++ return 0;
++}
++#endif
++
++static s32 mlb150_dev_ctr_read(u32 ctr_offset, u32 *ctr_val)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(ctr_offset, mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Read CTR timeout\n");
++ return -ETIME;
++ }
++
++ ctr_val[0] = __raw_readl(mlb_base + REG_MDAT0);
++ ctr_val[1] = __raw_readl(mlb_base + REG_MDAT1);
++ ctr_val[2] = __raw_readl(mlb_base + REG_MDAT2);
++ ctr_val[3] = __raw_readl(mlb_base + REG_MDAT3);
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++ return 0;
++}
++
++static s32 mlb150_dev_ctr_write(u32 ctr_offset, const u32 *ctr_val)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++
++ __raw_writel(ctr_val[0], mlb_base + REG_MDAT0);
++ __raw_writel(ctr_val[1], mlb_base + REG_MDAT1);
++ __raw_writel(ctr_val[2], mlb_base + REG_MDAT2);
++ __raw_writel(ctr_val[3], mlb_base + REG_MDAT3);
++
++ __raw_writel(MADR_WNR | ctr_offset,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Write CTR timeout\n");
++ return -ETIME;
++ }
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++#ifdef DEBUG_CTR
++ {
++ u32 ctr_rd[4] = { 0 };
++
++ if (!mlb150_dev_ctr_read(ctr_offset, ctr_rd)) {
++ if (ctr_val[0] == ctr_rd[0] &&
++ ctr_val[1] == ctr_rd[1] &&
++ ctr_val[2] == ctr_rd[2] &&
++ ctr_val[3] == ctr_rd[3])
++ return 0;
++ else {
++ pr_debug("mxc_mlb150: ctr write failed\n");
++ pr_debug("offset: 0x%x\n", ctr_offset);
++ pr_debug("Write: 0x%x 0x%x 0x%x 0x%x\n",
++ ctr_val[3], ctr_val[2],
++ ctr_val[1], ctr_val[0]);
++ pr_debug("Read: 0x%x 0x%x 0x%x 0x%x\n",
++ ctr_rd[3], ctr_rd[2],
++ ctr_rd[1], ctr_rd[0]);
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: ctr read failed\n");
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++#ifdef DEBUG
++static s32 mlb150_dev_cat_read(u32 ctr_offset, u32 ch, u16 *cat_val)
++{
++ u16 ctr_val[8] = { 0 };
++
++ if (mlb150_dev_ctr_read(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ /*
++ * Use u16 array to get u32 array value,
++ * need to convert
++ */
++ cat_val = ctr_val[ch % 8];
++
++ return 0;
++}
++#endif
++
++static s32 mlb150_dev_cat_write(u32 ctr_offset, u32 ch, const u16 cat_val)
++{
++ u16 ctr_val[8] = { 0 };
++
++ if (mlb150_dev_ctr_read(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ ctr_val[ch % 8] = cat_val;
++ if (mlb150_dev_ctr_write(ctr_offset, (u32 *)ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++#define mlb150_dev_cat_mlb_read(ch, cat_val) \
++ mlb150_dev_cat_read(BUF_CAT_MLB_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_mlb_write(ch, cat_val) \
++ mlb150_dev_cat_write(BUF_CAT_MLB_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_hbi_read(ch, cat_val) \
++ mlb150_dev_cat_read(BUF_CAT_HBI_OFFSET + (ch >> 3), ch, cat_val)
++#define mlb150_dev_cat_hbi_write(ch, cat_val) \
++ mlb150_dev_cat_write(BUF_CAT_HBI_OFFSET + (ch >> 3), ch, cat_val)
++
++#define mlb150_dev_cdt_read(ch, cdt_val) \
++ mlb150_dev_ctr_read(BUF_CDT_OFFSET + ch, cdt_val)
++#define mlb150_dev_cdt_write(ch, cdt_val) \
++ mlb150_dev_ctr_write(BUF_CDT_OFFSET + ch, cdt_val)
++#define mlb150_dev_adt_read(ch, adt_val) \
++ mlb150_dev_ctr_read(BUF_ADT_OFFSET + ch, adt_val)
++#define mlb150_dev_adt_write(ch, adt_val) \
++ mlb150_dev_ctr_write(BUF_ADT_OFFSET + ch, adt_val)
++
++static s32 mlb150_dev_get_adt_sts(u32 ch)
++{
++ s32 timeout = 1000;
++ unsigned long flags;
++ u32 reg;
++
++ spin_lock_irqsave(&ctr_lock, flags);
++ __raw_writel(BUF_ADT_OFFSET + ch,
++ mlb_base + REG_MADR);
++
++ while ((!(__raw_readl(mlb_base + REG_MCTL)
++ & MCTL_XCMP)) &&
++ timeout--)
++ ;
++
++ if (timeout <= 0) {
++ spin_unlock_irqrestore(&ctr_lock, flags);
++ pr_debug("mxc_mlb150: Read CTR timeout\n");
++ return -ETIME;
++ }
++
++ reg = __raw_readl(mlb_base + REG_MDAT1);
++
++ __raw_writel(0, mlb_base + REG_MCTL);
++ spin_unlock_irqrestore(&ctr_lock, flags);
++
++#ifdef DEBUG_ADT
++ pr_debug("mxc_mlb150: Get ch %d adt sts: 0x%08x\n", ch, reg);
++#endif
++
++ return reg;
++}
++
++#ifdef DEBUG
++static void mlb150_dev_dump_ctr_tbl(u32 ch_start, u32 ch_end)
++{
++ u32 i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ pr_debug("mxc_mlb150: CDT Table");
++ for (i = BUF_CDT_OFFSET + ch_start;
++ i < BUF_CDT_OFFSET + ch_end;
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: ADT Table");
++ for (i = BUF_ADT_OFFSET + ch_start;
++ i < BUF_ADT_OFFSET + ch_end;
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: CAT MLB Table");
++ for (i = BUF_CAT_MLB_OFFSET + (ch_start >> 3);
++ i <= BUF_CAT_MLB_OFFSET + ((ch_end + 8) >> 3);
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++
++ pr_debug("mxc_mlb150: CAT HBI Table");
++ for (i = BUF_CAT_HBI_OFFSET + (ch_start >> 3);
++ i <= BUF_CAT_HBI_OFFSET + ((ch_end + 8) >> 3);
++ ++i) {
++ mlb150_dev_ctr_read(i, ctr_val);
++ pr_debug("CTR 0x%02x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
++ i, ctr_val[3], ctr_val[2], ctr_val[1], ctr_val[0]);
++ }
++}
++#endif
++
++/*
++ * Initial the MLB module device
++ */
++static inline void mlb150_dev_enable_dma_irq(u32 enable)
++{
++ u32 ch_rx_mask = (1 << SYNC_RX_CL_AHB0) | (1 << CTRL_RX_CL_AHB0)
++ | (1 << ASYNC_RX_CL_AHB0) | (1 << ISOC_RX_CL_AHB0)
++ | (1 << SYNC_TX_CL_AHB0) | (1 << CTRL_TX_CL_AHB0)
++ | (1 << ASYNC_TX_CL_AHB0) | (1 << ISOC_TX_CL_AHB0);
++ u32 ch_tx_mask = (1 << (SYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (SYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_TX_CL_AHB1 - INT_AHB1_CH_START));
++
++ if (enable) {
++ __raw_writel(ch_rx_mask, mlb_base + REG_ACMR0);
++ __raw_writel(ch_tx_mask, mlb_base + REG_ACMR1);
++ } else {
++ __raw_writel(0x0, mlb_base + REG_ACMR0);
++ __raw_writel(0x0, mlb_base + REG_ACMR1);
++ }
++}
++
++
++static void mlb150_dev_init_ir_amba_ahb(void)
++{
++ u32 reg = 0;
++
++ /*
++ * Step 1. Program the ACMRn registers to enable interrupts from all
++ * active DMA channels
++ */
++ mlb150_dev_enable_dma_irq(1);
++
++ /*
++ * Step 2. Select the status clear method:
++ * ACTL.SCE = 0, hardware clears on read
++ * ACTL.SCE = 1, software writes a '1' to clear
++ * We only support DMA MODE 1
++ */
++ reg = __raw_readl(mlb_base + REG_ACTL);
++ reg |= ACTL_DMAMODE;
++#ifdef MULTIPLE_PACKAGE_MODE
++ reg |= REG_ACTL_MPB;
++#endif
++
++ /*
++ * Step 3. Select 1 or 2 interrupt signals:
++ * ACTL.SMX = 0: one interrupt for channels 0 - 31 on ahb_init[0]
++ * and another interrupt for channels 32 - 63 on ahb_init[1]
++ * ACTL.SMX = 1: singel interrupt all channels on ahb_init[0]
++ */
++ reg &= ~ACTL_SMX;
++
++ __raw_writel(reg, mlb_base + REG_ACTL);
++}
++
++static inline void mlb150_dev_enable_ir_mlb(u32 enable)
++{
++ /*
++ * Step 1, Select the MSn to be cleared by software,
++ * writing a '0' to the appropriate bits
++ */
++ __raw_writel(0, mlb_base + REG_MS0);
++ __raw_writel(0, mlb_base + REG_MS1);
++
++ /*
++ * Step 1, Program MIEN to enable protocol error
++ * interrupts for all active MLB channels
++ */
++ if (enable)
++ __raw_writel(MIEN_CTX_PE |
++ MIEN_CRX_PE | MIEN_ATX_PE |
++ MIEN_ARX_PE | MIEN_SYNC_PE |
++ MIEN_ISOC_PE,
++ mlb_base + REG_MIEN);
++ else
++ __raw_writel(0, mlb_base + REG_MIEN);
++}
++
++static inline void mlb150_enable_pll(struct mlb_data *drvdata)
++{
++ u32 c0_val;
++
++ __raw_writel(MLBPC1_VAL,
++ drvdata->membase + REG_MLBPC1);
++
++ c0_val = __raw_readl(drvdata->membase + REG_MLBC0);
++ if (c0_val & MLBC0_MLBPEN) {
++ c0_val &= ~MLBC0_MLBPEN;
++ __raw_writel(c0_val,
++ drvdata->membase + REG_MLBC0);
++ }
++
++ clk_prepare_enable(drvdata->clk_mlb6p);
++
++ c0_val |= (MLBC0_MLBPEN);
++ __raw_writel(c0_val, drvdata->membase + REG_MLBC0);
++}
++
++static inline void mlb150_disable_pll(struct mlb_data *drvdata)
++{
++ u32 c0_val;
++
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ c0_val = __raw_readl(drvdata->membase + REG_MLBC0);
++
++ __raw_writel(0x0, drvdata->membase + REG_MLBPC1);
++
++ c0_val &= ~MLBC0_MLBPEN;
++ __raw_writel(c0_val, drvdata->membase + REG_MLBC0);
++}
++
++static void mlb150_dev_reset_cdt(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM); ++i)
++ mlb150_dev_ctr_write(BUF_CDT_OFFSET + i, ctr_val);
++}
++
++static s32 mlb150_dev_init_ch_cdt(struct mlb_dev_info *pdevinfo, u32 ch,
++ enum MLB_CTYPE ctype, u32 ch_func)
++{
++ u32 cdt_val[4] = { 0 };
++
++ /* a. Set the 14-bit base address (BA) */
++ pr_debug("mxc_mlb150: ctype: %d, ch: %d, dbr_buf_head: 0x%08x",
++ ctype, ch, pdevinfo->channels[ch_func].dbr_buf_head);
++ cdt_val[3] = (pdevinfo->channels[ch_func].dbr_buf_head)
++ << CDT_BA_SHIFT;
++ /*
++ * b. Set the 12-bit or 13-bit buffer depth (BD)
++ * BD = buffer depth in bytes - 1
++ * For synchronous channels: (BD + 1) = 4 * m * bpf
++ * For control channels: (BD + 1) >= max packet length (64)
++ * For asynchronous channels: (BD + 1) >= max packet length
++ * 1024 for a MOST Data packet (MDP);
++ * 1536 for a MOST Ethernet Packet (MEP)
++ * For isochronous channels: (BD + 1) mod (BS + 1) = 0
++ * BS
++ */
++ if (MLB_CTYPE_ISOC == ctype)
++ cdt_val[1] |= (pdevinfo->isoc_blksz - 1);
++ /* BD */
++ cdt_val[3] |= (pdevinfo->cdt_buf_dep - 1) << CDT_BD_SHIFT;
++
++ pr_debug("mxc_mlb150: Set CDT val of channel %d, type: %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ ch, ctype, cdt_val[3], cdt_val[2], cdt_val[1], cdt_val[0]);
++
++ if (mlb150_dev_cdt_write(ch, cdt_val))
++ return -ETIME;
++
++#ifdef DEBUG_CTR
++ {
++ u32 cdt_rd[4] = { 0 };
++ if (!mlb150_dev_cdt_read(ch, cdt_rd)) {
++ pr_debug("mxc_mlb150: CDT val of channel %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ ch, cdt_rd[3], cdt_rd[2], cdt_rd[1], cdt_rd[0]);
++ if (cdt_rd[3] == cdt_val[3] &&
++ cdt_rd[2] == cdt_val[2] &&
++ cdt_rd[1] == cdt_val[1] &&
++ cdt_rd[0] == cdt_val[0]) {
++ pr_debug("mxc_mlb150: set cdt succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set cdt failed!\n");
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: Read CDT val of channel %d failed\n",
++ ch);
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++static s32 mlb150_dev_init_ch_cat(u32 ch, u32 cl,
++ u32 cat_mode, enum MLB_CTYPE ctype)
++{
++ u16 cat_val = 0;
++#ifdef DEBUG_CTR
++ u16 cat_rd = 0;
++#endif
++
++ cat_val = CAT_CE | (ctype << CAT_CT_SHIFT) | cl;
++
++ if (cat_mode & CAT_MODE_OUTBOUND_DMA)
++ cat_val |= CAT_RNW;
++
++ if (MLB_CTYPE_SYNC == ctype)
++ cat_val |= CAT_MT;
++
++ switch (cat_mode) {
++ case CAT_MODE_RX | CAT_MODE_INBOUND_DMA:
++ case CAT_MODE_TX | CAT_MODE_OUTBOUND_DMA:
++ pr_debug("mxc_mlb150: set CAT val of channel %d, type: %d: 0x%04x\n",
++ ch, ctype, cat_val);
++
++ if (mlb150_dev_cat_mlb_write(ch, cat_val))
++ return -ETIME;
++#ifdef DEBUG_CTR
++ if (!mlb150_dev_cat_mlb_read(ch, &cat_rd))
++ pr_debug("mxc_mlb150: CAT val of mlb channel %d: 0x%04x",
++ ch, cat_rd);
++ else {
++ pr_debug("mxc_mlb150: Read CAT of mlb channel %d failed\n",
++ ch);
++ return -EBADE;
++ }
++#endif
++ break;
++ case CAT_MODE_TX | CAT_MODE_INBOUND_DMA:
++ case CAT_MODE_RX | CAT_MODE_OUTBOUND_DMA:
++ pr_debug("mxc_mlb150: set CAT val of channel %d, type: %d: 0x%04x\n",
++ cl, ctype, cat_val);
++
++ if (mlb150_dev_cat_hbi_write(cl, cat_val))
++ return -ETIME;
++#ifdef DEBUG_CTR
++ if (!mlb150_dev_cat_hbi_read(cl, &cat_rd))
++ pr_debug("mxc_mlb150: CAT val of hbi channel %d: 0x%04x",
++ cl, cat_rd);
++ else {
++ pr_debug("mxc_mlb150: Read CAT of hbi channel %d failed\n",
++ cl);
++ return -EBADE;
++ }
++#endif
++ break;
++ default:
++ return EBADRQC;
++ }
++
++#ifdef DEBUG_CTR
++ {
++ if (cat_val == cat_rd) {
++ pr_debug("mxc_mlb150: set cat succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set cat failed!\n");
++ return -EBADE;
++ }
++ }
++#endif
++ return 0;
++}
++
++static void mlb150_dev_reset_cat(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM >> 3); ++i) {
++ mlb150_dev_ctr_write(BUF_CAT_MLB_OFFSET + i, ctr_val);
++ mlb150_dev_ctr_write(BUF_CAT_HBI_OFFSET + i, ctr_val);
++ }
++}
++
++static void mlb150_dev_init_rfb(struct mlb_dev_info *pdevinfo, u32 rx_ch,
++ u32 tx_ch, enum MLB_CTYPE ctype)
++{
++ u32 rx_cl = pdevinfo->channels[RX_CHANNEL].cl;
++ u32 tx_cl = pdevinfo->channels[TX_CHANNEL].cl;
++ /* Step 1, Initialize all bits of CAT to '0' */
++ mlb150_dev_reset_cat();
++ mlb150_dev_reset_cdt();
++ /*
++ * Step 2, Initialize logical channel
++ * Step 3, Program the CDT for channel N
++ */
++ mlb150_dev_init_ch_cdt(pdevinfo, rx_cl, ctype, RX_CHANNEL);
++ mlb150_dev_init_ch_cdt(pdevinfo, tx_cl, ctype, TX_CHANNEL);
++
++ /* Step 4&5, Program the CAT for the inbound and outbound DMA */
++ mlb150_dev_init_ch_cat(rx_ch, rx_cl,
++ CAT_MODE_RX | CAT_MODE_INBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(rx_ch, rx_cl,
++ CAT_MODE_RX | CAT_MODE_OUTBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(tx_ch, tx_cl,
++ CAT_MODE_TX | CAT_MODE_INBOUND_DMA,
++ ctype);
++ mlb150_dev_init_ch_cat(tx_ch, tx_cl,
++ CAT_MODE_TX | CAT_MODE_OUTBOUND_DMA,
++ ctype);
++}
++
++static void mlb150_dev_reset_adt(void)
++{
++ int i = 0;
++ u32 ctr_val[4] = { 0 };
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++
++ for (i = 0; i < (LOGIC_CH_NUM); ++i)
++ mlb150_dev_ctr_write(BUF_ADT_OFFSET + i, ctr_val);
++}
++
++static void mlb150_dev_reset_whole_ctr(void)
++{
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++ mlb150_dev_reset_cdt();
++ mlb150_dev_reset_adt();
++ mlb150_dev_reset_cat();
++}
++
++#define CLR_REG(reg) __raw_writel(0x0, mlb_base + reg)
++
++static void mlb150_dev_reset_all_regs(void)
++{
++ CLR_REG(REG_MLBC0);
++ CLR_REG(REG_MLBPC0);
++ CLR_REG(REG_MS0);
++ CLR_REG(REG_MS1);
++ CLR_REG(REG_MSS);
++ CLR_REG(REG_MSD);
++ CLR_REG(REG_MIEN);
++ CLR_REG(REG_MLBPC2);
++ CLR_REG(REG_MLBPC1);
++ CLR_REG(REG_MLBC1);
++ CLR_REG(REG_HCTL);
++ CLR_REG(REG_HCMR0);
++ CLR_REG(REG_HCMR1);
++ CLR_REG(REG_HCER0);
++ CLR_REG(REG_HCER1);
++ CLR_REG(REG_HCBR0);
++ CLR_REG(REG_HCBR1);
++ CLR_REG(REG_MDAT0);
++ CLR_REG(REG_MDAT1);
++ CLR_REG(REG_MDAT2);
++ CLR_REG(REG_MDAT3);
++ CLR_REG(REG_MDWE0);
++ CLR_REG(REG_MDWE1);
++ CLR_REG(REG_MDWE2);
++ CLR_REG(REG_MDWE3);
++ CLR_REG(REG_MCTL);
++ CLR_REG(REG_MADR);
++ CLR_REG(REG_ACTL);
++ CLR_REG(REG_ACSR0);
++ CLR_REG(REG_ACSR1);
++ CLR_REG(REG_ACMR0);
++ CLR_REG(REG_ACMR1);
++}
++
++static inline s32 mlb150_dev_pipo_start(struct mlb_ringbuf *rbuf,
++ u32 ahb_ch, u32 buf_addr)
++{
++ u32 ctr_val[4] = { 0 };
++
++ ctr_val[1] |= ADT_RDY1;
++ ctr_val[2] = buf_addr;
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_pipo_next(u32 ahb_ch, enum MLB_CTYPE ctype,
++ u32 dne_sts, u32 buf_addr)
++{
++ u32 ctr_val[4] = { 0 };
++
++ if (MLB_CTYPE_ASYNC == ctype ||
++ MLB_CTYPE_CTRL == ctype) {
++ ctr_val[1] |= ADT_PS1;
++ ctr_val[1] |= ADT_PS2;
++ }
++
++ /*
++ * Clear DNE1 and ERR1
++ * Set the page ready bit (RDY1)
++ */
++ if (dne_sts & ADT_DNE1) {
++ ctr_val[1] |= ADT_RDY2;
++ ctr_val[3] = buf_addr;
++ } else {
++ ctr_val[1] |= ADT_RDY1;
++ ctr_val[2] = buf_addr;
++ }
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static inline s32 mlb150_dev_pipo_stop(struct mlb_ringbuf *rbuf, u32 ahb_ch)
++{
++ u32 ctr_val[4] = { 0 };
++ unsigned long flags;
++
++ write_lock_irqsave(&rbuf->rb_lock, flags);
++ rbuf->head = rbuf->tail = 0;
++ write_unlock_irqrestore(&rbuf->rb_lock, flags);
++
++ if (mlb150_dev_adt_write(ahb_ch, ctr_val))
++ return -ETIME;
++
++ return 0;
++}
++
++static s32 mlb150_dev_init_ch_amba_ahb(struct mlb_dev_info *pdevinfo,
++ struct mlb_channel_info *chinfo,
++ enum MLB_CTYPE ctype)
++{
++ u32 ctr_val[4] = { 0 };
++
++ /* a. Set the 32-bit base address (BA1) */
++ ctr_val[3] = 0;
++ ctr_val[2] = 0;
++ ctr_val[1] = (pdevinfo->adt_buf_dep - 1) << ADT_BD1_SHIFT;
++ ctr_val[1] |= (pdevinfo->adt_buf_dep - 1) << ADT_BD2_SHIFT;
++ if (MLB_CTYPE_ASYNC == ctype ||
++ MLB_CTYPE_CTRL == ctype) {
++ ctr_val[1] |= ADT_PS1;
++ ctr_val[1] |= ADT_PS2;
++ }
++
++ ctr_val[0] |= (ADT_LE | ADT_CE);
++
++ pr_debug("mxc_mlb150: Set ADT val of channel %d, ctype: %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ chinfo->cl, ctype, ctr_val[3], ctr_val[2],
++ ctr_val[1], ctr_val[0]);
++
++ if (mlb150_dev_adt_write(chinfo->cl, ctr_val))
++ return -ETIME;
++
++#ifdef DEBUG_CTR
++ {
++ u32 ctr_rd[4] = { 0 };
++ if (!mlb150_dev_adt_read(chinfo->cl, ctr_rd)) {
++ pr_debug("mxc_mlb150: ADT val of channel %d: "
++ "0x%08x 0x%08x 0x%08x 0x%08x\n",
++ chinfo->cl, ctr_rd[3], ctr_rd[2],
++ ctr_rd[1], ctr_rd[0]);
++ if (ctr_rd[3] == ctr_val[3] &&
++ ctr_rd[2] == ctr_val[2] &&
++ ctr_rd[1] == ctr_val[1] &&
++ ctr_rd[0] == ctr_val[0]) {
++ pr_debug("mxc_mlb150: set adt succeed!\n");
++ return 0;
++ } else {
++ pr_debug("mxc_mlb150: set adt failed!\n");
++ return -EBADE;
++ }
++ } else {
++ pr_debug("mxc_mlb150: Read ADT val of channel %d failed\n",
++ chinfo->cl);
++ return -EBADE;
++ }
++ }
++#endif
++
++ return 0;
++}
++
++static void mlb150_dev_init_amba_ahb(struct mlb_dev_info *pdevinfo,
++ enum MLB_CTYPE ctype)
++{
++ struct mlb_channel_info *tx_chinfo = &pdevinfo->channels[TX_CHANNEL];
++ struct mlb_channel_info *rx_chinfo = &pdevinfo->channels[RX_CHANNEL];
++
++ /* Step 1, Initialize all bits of the ADT to '0' */
++ mlb150_dev_reset_adt();
++
++ /*
++ * Step 2, Select a logic channel
++ * Step 3, Program the AMBA AHB block ping page for channel N
++ * Step 4, Program the AMBA AHB block pong page for channel N
++ */
++ mlb150_dev_init_ch_amba_ahb(pdevinfo, rx_chinfo, ctype);
++ mlb150_dev_init_ch_amba_ahb(pdevinfo, tx_chinfo, ctype);
++}
++
++static void mlb150_dev_exit(void)
++{
++ u32 c0_val, hctl_val;
++
++ /* Disable EN bits */
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~(MLBC0_MLBEN | MLBC0_MLBPEN);
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ hctl_val = __raw_readl(mlb_base + REG_HCTL);
++ hctl_val &= ~HCTL_EN;
++ __raw_writel(hctl_val, mlb_base + REG_HCTL);
++
++ __raw_writel(0x0, mlb_base + REG_HCMR0);
++ __raw_writel(0x0, mlb_base + REG_HCMR1);
++
++ mlb150_dev_enable_dma_irq(0);
++ mlb150_dev_enable_ir_mlb(0);
++}
++
++static void mlb150_dev_init(void)
++{
++ u32 c0_val;
++ u32 ch_rx_mask = (1 << SYNC_RX_CL_AHB0) | (1 << CTRL_RX_CL_AHB0)
++ | (1 << ASYNC_RX_CL_AHB0) | (1 << ISOC_RX_CL_AHB0)
++ | (1 << SYNC_TX_CL_AHB0) | (1 << CTRL_TX_CL_AHB0)
++ | (1 << ASYNC_TX_CL_AHB0) | (1 << ISOC_TX_CL_AHB0);
++ u32 ch_tx_mask = (1 << (SYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_RX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (SYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (CTRL_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ASYNC_TX_CL_AHB1 - INT_AHB1_CH_START)) |
++ (1 << (ISOC_TX_CL_AHB1 - INT_AHB1_CH_START));
++
++ /* Disable EN bits */
++ mlb150_dev_exit();
++
++ /*
++ * Step 1. Initialize CTR and registers
++ * a. Set all bit of the CTR (CAT, CDT, and ADT) to 0.
++ */
++ mlb150_dev_reset_whole_ctr();
++
++ /* a. Set all bit of the CTR (CAT, CDT, and ADT) to 0. */
++ mlb150_dev_reset_all_regs();
++
++ /*
++ * Step 2, Configure the MediaLB interface
++ * Select pin mode and clock, 3-pin and 256fs
++ */
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~(MLBC0_MLBPEN | MLBC0_MLBCLK_MASK);
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ c0_val |= MLBC0_MLBEN;
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ /* Step 3, Configure the HBI interface */
++ __raw_writel(ch_rx_mask, mlb_base + REG_HCMR0);
++ __raw_writel(ch_tx_mask, mlb_base + REG_HCMR1);
++ __raw_writel(HCTL_EN, mlb_base + REG_HCTL);
++
++ mlb150_dev_init_ir_amba_ahb();
++
++ mlb150_dev_enable_ir_mlb(1);
++}
++
++static s32 mlb150_dev_unmute_syn_ch(u32 rx_ch, u32 rx_cl, u32 tx_ch, u32 tx_cl)
++{
++ u32 timeout = 10000;
++
++ /*
++ * Check that MediaLB clock is running (MLBC1.CLKM = 0)
++ * If MLBC1.CLKM = 1, clear the register bit, wait one
++ * APB or I/O clock cycle and repeat the check
++ */
++ while ((__raw_readl(mlb_base + REG_MLBC1) & MLBC1_CLKM)
++ && --timeout)
++ __raw_writel(~MLBC1_CLKM, mlb_base + REG_MLBC1);
++
++ if (0 == timeout)
++ return -ETIME;
++
++ timeout = 10000;
++ /* Poll for MLB lock (MLBC0.MLBLK = 1) */
++ while (!(__raw_readl(mlb_base + REG_MLBC0) & MLBC0_MLBLK)
++ && --timeout)
++ ;
++
++ if (0 == timeout)
++ return -ETIME;
++
++ /* Unmute synchronous channel(s) */
++ mlb150_dev_cat_mlb_write(rx_ch, CAT_CE | rx_cl);
++ mlb150_dev_cat_mlb_write(tx_ch,
++ CAT_CE | tx_cl | CAT_RNW);
++ mlb150_dev_cat_hbi_write(rx_cl,
++ CAT_CE | rx_cl | CAT_RNW);
++ mlb150_dev_cat_hbi_write(tx_cl, CAT_CE | tx_cl);
++
++ return 0;
++}
++
++/* In case the user calls channel shutdown, but rx or tx is not completed yet */
++static s32 mlb150_trans_complete_check(struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ s32 timeout = 1024;
++
++ while (timeout--) {
++ read_lock(&tx_rbuf->rb_lock);
++ if (!CIRC_CNT(tx_rbuf->head, tx_rbuf->tail, TRANS_RING_NODES)) {
++ read_unlock(&tx_rbuf->rb_lock);
++ break;
++ } else
++ read_unlock(&tx_rbuf->rb_lock);
++ }
++
++ if (timeout <= 0) {
++ pr_debug("TX complete check timeout!\n");
++ return -ETIME;
++ }
++
++ timeout = 1024;
++ while (timeout--) {
++ read_lock(&rx_rbuf->rb_lock);
++ if (!CIRC_CNT(rx_rbuf->head, rx_rbuf->tail, TRANS_RING_NODES)) {
++ read_unlock(&rx_rbuf->rb_lock);
++ break;
++ } else
++ read_unlock(&rx_rbuf->rb_lock);
++ }
++
++ if (timeout <= 0) {
++ pr_debug("RX complete check timeout!\n");
++ return -ETIME;
++ }
++
++ /*
++ * Interrupt from TX can only inform that the data is sent
++ * to AHB bus, not mean that it is sent to MITB. Thus we add
++ * a delay here for data to be completed sent.
++ */
++ udelay(1000);
++
++ return 0;
++}
++
++/*
++ * Enable/Disable the MLB IRQ
++ */
++static void mxc_mlb150_irq_enable(struct mlb_data *drvdata, u8 enable)
++{
++ if (enable) {
++ enable_irq(drvdata->irq_ahb0);
++ enable_irq(drvdata->irq_ahb1);
++ enable_irq(drvdata->irq_mlb);
++ } else {
++ disable_irq(drvdata->irq_ahb0);
++ disable_irq(drvdata->irq_ahb1);
++ disable_irq(drvdata->irq_mlb);
++ }
++}
++
++/*
++ * Enable the MLB channel
++ */
++static s32 mlb_channel_enable(struct mlb_data *drvdata,
++ int chan_dev_id, int on)
++{
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_channel_info *tx_chinfo = &pdevinfo->channels[TX_CHANNEL];
++ struct mlb_channel_info *rx_chinfo = &pdevinfo->channels[RX_CHANNEL];
++ u32 tx_ch = tx_chinfo->address;
++ u32 rx_ch = rx_chinfo->address;
++ u32 tx_cl = tx_chinfo->cl;
++ u32 rx_cl = rx_chinfo->cl;
++ s32 ret = 0;
++
++ /*
++ * setup the direction, enable, channel type,
++ * mode select, channel address and mask buf start
++ */
++ if (on) {
++ u32 ctype = pdevinfo->channel_type;
++
++ mlb150_dev_enable_ctr_write(0xffffffff, 0xffffffff,
++ 0xffffffff, 0xffffffff);
++ mlb150_dev_init_rfb(pdevinfo, rx_ch, tx_ch, ctype);
++
++ mlb150_dev_init_amba_ahb(pdevinfo, ctype);
++
++#ifdef DEBUG
++ mlb150_dev_dump_ctr_tbl(0, tx_chinfo->cl + 1);
++#endif
++ /* Synchronize and unmute synchrouous channel */
++ if (MLB_CTYPE_SYNC == ctype) {
++ ret = mlb150_dev_unmute_syn_ch(rx_ch, rx_cl,
++ tx_ch, tx_cl);
++ if (ret)
++ return ret;
++ }
++
++ mlb150_dev_enable_ctr_write(0x0, ADT_RDY1 | ADT_DNE1 |
++ ADT_ERR1 | ADT_PS1 |
++ ADT_RDY2 | ADT_DNE2 | ADT_ERR2 | ADT_PS2,
++ 0xffffffff, 0xffffffff);
++
++ if (pdevinfo->fps >= CLK_2048FS)
++ mlb150_enable_pll(drvdata);
++
++ atomic_set(&pdevinfo->on, 1);
++
++#ifdef DEBUG
++ mlb150_dev_dump_reg();
++ mlb150_dev_dump_ctr_tbl(0, tx_chinfo->cl + 1);
++#endif
++ /* Init RX ADT */
++ mlb150_dev_pipo_start(&pdevinfo->rx_rbuf, rx_cl,
++ pdevinfo->rx_rbuf.phy_addrs[0]);
++ } else {
++ mlb150_dev_pipo_stop(&pdevinfo->rx_rbuf, rx_cl);
++
++ mlb150_dev_enable_dma_irq(0);
++ mlb150_dev_enable_ir_mlb(0);
++
++ mlb150_dev_reset_cat();
++
++ atomic_set(&pdevinfo->on, 0);
++
++ if (pdevinfo->fps >= CLK_2048FS)
++ mlb150_disable_pll(drvdata);
++ }
++
++ return 0;
++}
++
++/*
++ * MLB interrupt handler
++ */
++static void mlb_rx_isr(s32 ctype, u32 ahb_ch, struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ s32 head, tail, adt_sts;
++ u32 rx_buf_ptr;
++
++#ifdef DEBUG_RX
++ pr_debug("mxc_mlb150: mlb_rx_isr\n");
++#endif
++
++ read_lock(&rx_rbuf->rb_lock);
++
++ head = (rx_rbuf->head + 1) & (TRANS_RING_NODES - 1);
++ tail = ACCESS_ONCE(rx_rbuf->tail);
++ read_unlock(&rx_rbuf->rb_lock);
++
++ if (CIRC_SPACE(head, tail, TRANS_RING_NODES) >= 1) {
++ rx_buf_ptr = rx_rbuf->phy_addrs[head];
++
++ /* commit the item before incrementing the head */
++ smp_wmb();
++
++ write_lock(&rx_rbuf->rb_lock);
++ rx_rbuf->head = head;
++ write_unlock(&rx_rbuf->rb_lock);
++
++ /* wake up the reader */
++ wake_up_interruptible(&pdevinfo->rx_wq);
++ } else {
++ rx_buf_ptr = rx_rbuf->phy_addrs[head];
++ pr_debug("drop RX package, due to no space, (%d,%d)\n",
++ head, tail);
++ }
++
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++ /* Set ADT for RX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, rx_buf_ptr);
++}
++
++static void mlb_tx_isr(s32 ctype, u32 ahb_ch, struct mlb_dev_info *pdevinfo)
++{
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ s32 head, tail, adt_sts;
++ u32 tx_buf_ptr;
++
++ read_lock(&tx_rbuf->rb_lock);
++
++ head = ACCESS_ONCE(tx_rbuf->head);
++ tail = (tx_rbuf->tail + 1) & (TRANS_RING_NODES - 1);
++ read_unlock(&tx_rbuf->rb_lock);
++
++ smp_mb();
++ write_lock(&tx_rbuf->rb_lock);
++ tx_rbuf->tail = tail;
++ write_unlock(&tx_rbuf->rb_lock);
++
++ /* check the current tx buffer is available or not */
++ if (CIRC_CNT(head, tail, TRANS_RING_NODES) >= 1) {
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ tx_buf_ptr = tx_rbuf->phy_addrs[tail];
++
++ wake_up_interruptible(&pdevinfo->tx_wq);
++
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++ /* Set ADT for TX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, tx_buf_ptr);
++ }
++}
++
++static irqreturn_t mlb_ahb_isr(int irq, void *dev_id)
++{
++ u32 acsr0, hcer0;
++ u32 ch_mask = (1 << SYNC_RX_CL) | (1 << CTRL_RX_CL)
++ | (1 << ASYNC_RX_CL) | (1 << ISOC_RX_CL)
++ | (1 << SYNC_TX_CL) | (1 << CTRL_TX_CL)
++ | (1 << ASYNC_TX_CL) | (1 << ISOC_TX_CL);
++
++ /*
++ * Step 5, Read the ACSRn registers to determine which channel or
++ * channels are causing the interrupt
++ */
++ acsr0 = __raw_readl(mlb_base + REG_ACSR0);
++
++ hcer0 = __raw_readl(mlb_base + REG_HCER0);
++
++ /*
++ * Step 6, If ACTL.SCE = 1, write the result of step 5 back to ACSR0
++ * and ACSR1 to clear the interrupt
++ * We'll not set ACTL_SCE
++ */
++
++ if (ch_mask & hcer0)
++ pr_err("CH encounters an AHB error: 0x%x\n", hcer0);
++
++ if ((1 << SYNC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_SYNC, SYNC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_SYNC]);
++
++ if ((1 << CTRL_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_CTRL, CTRL_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_CTRL]);
++
++ if ((1 << ASYNC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_ASYNC, ASYNC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_ASYNC]);
++
++ if ((1 << ISOC_RX_CL) & acsr0)
++ mlb_rx_isr(MLB_CTYPE_ISOC, ISOC_RX_CL,
++ &mlb_devinfo[MLB_CTYPE_ISOC]);
++
++ if ((1 << SYNC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_SYNC, SYNC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_SYNC]);
++
++ if ((1 << CTRL_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_CTRL, CTRL_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_CTRL]);
++
++ if ((1 << ASYNC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_ASYNC, ASYNC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_ASYNC]);
++
++ if ((1 << ISOC_TX_CL) & acsr0)
++ mlb_tx_isr(MLB_CTYPE_ASYNC, ISOC_TX_CL,
++ &mlb_devinfo[MLB_CTYPE_ISOC]);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mlb_isr(int irq, void *dev_id)
++{
++ u32 rx_int_sts, tx_int_sts, ms0,
++ ms1, tx_cis, rx_cis, ctype;
++ int minor;
++ u32 cdt_val[4] = { 0 };
++
++ /*
++ * Step 4, Read the MSn register to determine which channel(s)
++ * are causing the interrupt
++ */
++ ms0 = __raw_readl(mlb_base + REG_MS0);
++ ms1 = __raw_readl(mlb_base + REG_MS1);
++
++ /*
++ * The MLB150_MS0, MLB150_MS1 registers need to be cleared. In
++ * the spec description, the registers should be cleared when
++ * enabling interrupt. In fact, we also should clear it in ISR.
++ */
++ __raw_writel(0, mlb_base + REG_MS0);
++ __raw_writel(0, mlb_base + REG_MS1);
++
++ pr_debug("mxc_mlb150: mlb interrupt:0x%08x 0x%08x\n",
++ (u32)ms0, (u32)ms1);
++
++ for (minor = 0; minor < MLB_MINOR_DEVICES; minor++) {
++ struct mlb_dev_info *pdevinfo = &mlb_devinfo[minor];
++ u32 rx_mlb_ch = pdevinfo->channels[RX_CHANNEL].address;
++ u32 tx_mlb_ch = pdevinfo->channels[TX_CHANNEL].address;
++ u32 rx_mlb_cl = pdevinfo->channels[RX_CHANNEL].cl;
++ u32 tx_mlb_cl = pdevinfo->channels[TX_CHANNEL].cl;
++
++ tx_cis = rx_cis = 0;
++
++ ctype = pdevinfo->channel_type;
++ rx_int_sts = (rx_mlb_ch < 31) ? ms0 : ms1;
++ tx_int_sts = (tx_mlb_ch < 31) ? ms0 : ms1;
++
++ pr_debug("mxc_mlb150: channel interrupt: "
++ "tx %d: 0x%08x, rx %d: 0x%08x\n",
++ tx_mlb_ch, (u32)tx_int_sts, rx_mlb_ch, (u32)rx_int_sts);
++
++ /* Get tx channel interrupt status */
++ if (tx_int_sts & (1 << (tx_mlb_ch % 32))) {
++ mlb150_dev_cdt_read(tx_mlb_cl, cdt_val);
++ pr_debug("mxc_mlb150: TX_CH: %d, cdt_val[3]: 0x%08x, "
++ "cdt_val[2]: 0x%08x, "
++ "cdt_val[1]: 0x%08x, "
++ "cdt_val[0]: 0x%08x\n",
++ tx_mlb_ch, cdt_val[3], cdt_val[2],
++ cdt_val[1], cdt_val[0]);
++ switch (ctype) {
++ case MLB_CTYPE_SYNC:
++ tx_cis = (cdt_val[2] & ~CDT_SYNC_WSTS_MASK)
++ >> CDT_SYNC_WSTS_SHIFT;
++ /*
++ * Clear RSTS/WSTS errors to resume
++ * channel operation
++ * a. For synchronous channels: WSTS[3] = 0
++ */
++ cdt_val[2] &= ~(0x8 << CDT_SYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_CTRL:
++ case MLB_CTYPE_ASYNC:
++ tx_cis = (cdt_val[2] &
++ ~CDT_CTRL_ASYNC_WSTS_MASK)
++ >> CDT_CTRL_ASYNC_WSTS_SHIFT;
++ tx_cis = (cdt_val[3] & CDT_CTRL_ASYNC_WSTS_1) ?
++ (tx_cis | (0x1 << 4)) : tx_cis;
++ /*
++ * b. For async and ctrl channels:
++ * RSTS[4]/WSTS[4] = 0
++ * and RSTS[2]/WSTS[2] = 0
++ */
++ cdt_val[3] &= ~CDT_CTRL_ASYNC_WSTS_1;
++ cdt_val[2] &=
++ ~(0x4 << CDT_CTRL_ASYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_ISOC:
++ tx_cis = (cdt_val[2] & ~CDT_ISOC_WSTS_MASK)
++ >> CDT_ISOC_WSTS_SHIFT;
++ /* c. For isoc channels: WSTS[2:1] = 0x00 */
++ cdt_val[2] &= ~(0x6 << CDT_ISOC_WSTS_SHIFT);
++ break;
++ default:
++ break;
++ }
++ mlb150_dev_cdt_write(tx_mlb_ch, cdt_val);
++ }
++
++ /* Get rx channel interrupt status */
++ if (rx_int_sts & (1 << (rx_mlb_ch % 32))) {
++ mlb150_dev_cdt_read(rx_mlb_cl, cdt_val);
++ pr_debug("mxc_mlb150: RX_CH: %d, cdt_val[3]: 0x%08x, "
++ "cdt_val[2]: 0x%08x, "
++ "cdt_val[1]: 0x%08x, "
++ "cdt_val[0]: 0x%08x\n",
++ rx_mlb_ch, cdt_val[3], cdt_val[2],
++ cdt_val[1], cdt_val[0]);
++ switch (ctype) {
++ case MLB_CTYPE_SYNC:
++ tx_cis = (cdt_val[2] & ~CDT_SYNC_RSTS_MASK)
++ >> CDT_SYNC_RSTS_SHIFT;
++ cdt_val[2] &= ~(0x8 << CDT_SYNC_WSTS_SHIFT);
++ break;
++ case MLB_CTYPE_CTRL:
++ case MLB_CTYPE_ASYNC:
++ tx_cis =
++ (cdt_val[2] & ~CDT_CTRL_ASYNC_RSTS_MASK)
++ >> CDT_CTRL_ASYNC_RSTS_SHIFT;
++ tx_cis = (cdt_val[3] & CDT_CTRL_ASYNC_RSTS_1) ?
++ (tx_cis | (0x1 << 4)) : tx_cis;
++ cdt_val[3] &= ~CDT_CTRL_ASYNC_RSTS_1;
++ cdt_val[2] &=
++ ~(0x4 << CDT_CTRL_ASYNC_RSTS_SHIFT);
++ break;
++ case MLB_CTYPE_ISOC:
++ tx_cis = (cdt_val[2] & ~CDT_ISOC_RSTS_MASK)
++ >> CDT_ISOC_RSTS_SHIFT;
++ cdt_val[2] &= ~(0x6 << CDT_ISOC_WSTS_SHIFT);
++ break;
++ default:
++ break;
++ }
++ mlb150_dev_cdt_write(rx_mlb_ch, cdt_val);
++ }
++
++ if (!tx_cis && !rx_cis)
++ continue;
++
++ /* fill exception event */
++ spin_lock(&pdevinfo->event_lock);
++ pdevinfo->ex_event |= (rx_cis << 16) | tx_cis;
++ spin_unlock(&pdevinfo->event_lock);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int mxc_mlb150_open(struct inode *inode, struct file *filp)
++{
++ int minor, ring_buf_size, buf_size, j, ret;
++ void __iomem *buf_addr;
++ ulong phy_addr;
++ struct mlb_dev_info *pdevinfo = NULL;
++ struct mlb_channel_info *pchinfo = NULL;
++ struct mlb_data *drvdata;
++
++ minor = MINOR(inode->i_rdev);
++ drvdata = container_of(inode->i_cdev, struct mlb_data, cdev);
++
++ if (minor < 0 || minor >= MLB_MINOR_DEVICES) {
++ pr_err("no device\n");
++ return -ENODEV;
++ }
++
++ /* open for each channel device */
++ if (atomic_cmpxchg(&mlb_devinfo[minor].opencnt, 0, 1) != 0) {
++ pr_err("busy\n");
++ return -EBUSY;
++ }
++
++ clk_prepare_enable(drvdata->clk_mlb3p);
++
++ /* initial MLB module */
++ mlb150_dev_init();
++
++ pdevinfo = &mlb_devinfo[minor];
++ pchinfo = &pdevinfo->channels[TX_CHANNEL];
++
++ ring_buf_size = pdevinfo->buf_size;
++ buf_size = ring_buf_size * (TRANS_RING_NODES * 2);
++ buf_addr = (void __iomem *)gen_pool_alloc(drvdata->iram_pool, buf_size);
++ if (buf_addr == NULL) {
++ ret = -ENOMEM;
++ pr_err("can not alloc rx/tx buffers: %d\n", buf_size);
++ return ret;
++ }
++ phy_addr = gen_pool_virt_to_phys(drvdata->iram_pool, (ulong)buf_addr);
++ pr_debug("IRAM Range: Virt 0x%p - 0x%p, Phys 0x%x - 0x%x, size: 0x%x\n",
++ buf_addr, (buf_addr + buf_size - 1), (u32)phy_addr,
++ (u32)(phy_addr + buf_size - 1), buf_size);
++ pdevinfo->rbuf_base_virt = buf_addr;
++ pdevinfo->rbuf_base_phy = phy_addr;
++ drvdata->iram_size = buf_size;
++
++ memset(buf_addr, 0, buf_size);
++
++ for (j = 0; j < (TRANS_RING_NODES);
++ ++j, buf_addr += ring_buf_size, phy_addr += ring_buf_size) {
++ pdevinfo->rx_rbuf.virt_bufs[j] = buf_addr;
++ pdevinfo->rx_rbuf.phy_addrs[j] = phy_addr;
++ pr_debug("RX Ringbuf[%d]: 0x%p 0x%x\n",
++ j, buf_addr, (u32)phy_addr);
++ }
++ pdevinfo->rx_rbuf.unit_size = ring_buf_size;
++ pdevinfo->rx_rbuf.total_size = buf_size;
++ for (j = 0; j < (TRANS_RING_NODES);
++ ++j, buf_addr += ring_buf_size, phy_addr += ring_buf_size) {
++ pdevinfo->tx_rbuf.virt_bufs[j] = buf_addr;
++ pdevinfo->tx_rbuf.phy_addrs[j] = phy_addr;
++ pr_debug("TX Ringbuf[%d]: 0x%p 0x%x\n",
++ j, buf_addr, (u32)phy_addr);
++ }
++
++ pdevinfo->tx_rbuf.unit_size = ring_buf_size;
++ pdevinfo->tx_rbuf.total_size = buf_size;
++
++ /* reset the buffer read/write ptr */
++ pdevinfo->rx_rbuf.head = pdevinfo->rx_rbuf.tail = 0;
++ pdevinfo->tx_rbuf.head = pdevinfo->tx_rbuf.tail = 0;
++ pdevinfo->ex_event = 0;
++ pdevinfo->tx_ok = 0;
++
++ init_waitqueue_head(&pdevinfo->rx_wq);
++ init_waitqueue_head(&pdevinfo->tx_wq);
++
++ drvdata = container_of(inode->i_cdev, struct mlb_data, cdev);
++ drvdata->devinfo = pdevinfo;
++ mxc_mlb150_irq_enable(drvdata, 1);
++ filp->private_data = drvdata;
++
++ return 0;
++}
++
++static int mxc_mlb150_release(struct inode *inode, struct file *filp)
++{
++ int minor;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ minor = MINOR(inode->i_rdev);
++ mxc_mlb150_irq_enable(drvdata, 0);
++
++#ifdef DEBUG
++ mlb150_dev_dump_reg();
++ mlb150_dev_dump_ctr_tbl(0, pdevinfo->channels[TX_CHANNEL].cl + 1);
++#endif
++
++ gen_pool_free(drvdata->iram_pool,
++ (ulong)pdevinfo->rbuf_base_virt, drvdata->iram_size);
++
++ mlb150_dev_exit();
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ atomic_set(&pdevinfo->on, 0);
++
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++ /* decrease the open count */
++ atomic_set(&pdevinfo->opencnt, 0);
++
++ drvdata->devinfo = NULL;
++
++ return 0;
++}
++
++static long mxc_mlb150_ioctl(struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ struct inode *inode = filp->f_path.dentry->d_inode;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ void __user *argp = (void __user *)arg;
++ unsigned long flags, event;
++ int minor;
++
++ minor = MINOR(inode->i_rdev);
++
++ switch (cmd) {
++ case MLB_CHAN_SETADDR:
++ {
++ unsigned int caddr;
++ /* get channel address from user space */
++ if (copy_from_user(&caddr, argp, sizeof(caddr))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++ pdevinfo->channels[TX_CHANNEL].address =
++ (caddr >> 16) & 0xFFFF;
++ pdevinfo->channels[RX_CHANNEL].address = caddr & 0xFFFF;
++ pr_debug("mxc_mlb150: set ch addr, tx: %d, rx: %d\n",
++ pdevinfo->channels[TX_CHANNEL].address,
++ pdevinfo->channels[RX_CHANNEL].address);
++ break;
++ }
++
++ case MLB_CHAN_STARTUP:
++ if (atomic_read(&pdevinfo->on)) {
++ pr_debug("mxc_mlb150: channel alreadly startup\n");
++ break;
++ }
++ if (mlb_channel_enable(drvdata, minor, 1))
++ return -EFAULT;
++ break;
++ case MLB_CHAN_SHUTDOWN:
++ if (atomic_read(&pdevinfo->on) == 0) {
++ pr_debug("mxc_mlb150: channel areadly shutdown\n");
++ break;
++ }
++ mlb150_trans_complete_check(pdevinfo);
++ mlb_channel_enable(drvdata, minor, 0);
++ break;
++ case MLB_CHAN_GETEVENT:
++ /* get and clear the ex_event */
++ spin_lock_irqsave(&pdevinfo->event_lock, flags);
++ event = pdevinfo->ex_event;
++ pdevinfo->ex_event = 0;
++ spin_unlock_irqrestore(&pdevinfo->event_lock, flags);
++
++ if (event) {
++ if (copy_to_user(argp, &event, sizeof(event))) {
++ pr_err("mxc_mlb150: copy to user failed\n");
++ return -EFAULT;
++ }
++ } else
++ return -EAGAIN;
++ break;
++ case MLB_SET_ISOC_BLKSIZE_188:
++ pdevinfo->isoc_blksz = 188;
++ pdevinfo->cdt_buf_dep = pdevinfo->adt_buf_dep =
++ pdevinfo->isoc_blksz * CH_ISOC_BLK_NUM;
++ break;
++ case MLB_SET_ISOC_BLKSIZE_196:
++ pdevinfo->isoc_blksz = 196;
++ pdevinfo->cdt_buf_dep = pdevinfo->adt_buf_dep =
++ pdevinfo->isoc_blksz * CH_ISOC_BLK_NUM;
++ break;
++ case MLB_SET_SYNC_QUAD:
++ {
++ u32 quad;
++
++ if (copy_from_user(&quad, argp, sizeof(quad))) {
++ pr_err("mxc_mlb150: get quad number "
++ "from user failed\n");
++ return -EFAULT;
++ }
++ if (quad <= 0 || quad > 3) {
++ pr_err("mxc_mlb150: Invalid Quadlets!"
++ "Quadlets in Sync mode can "
++ "only be 1, 2, 3\n");
++ return -EINVAL;
++ }
++ pdevinfo->sync_quad = quad;
++ /* Each quadlets is 4 bytes */
++ pdevinfo->cdt_buf_dep = quad * 4 * 4;
++ pdevinfo->adt_buf_dep =
++ pdevinfo->cdt_buf_dep * CH_SYNC_ADT_BUF_MULTI;
++ }
++ break;
++ case MLB_SET_FPS:
++ {
++ u32 fps, c0_val;
++
++ /* get fps from user space */
++ if (copy_from_user(&fps, argp, sizeof(fps))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ c0_val = __raw_readl(mlb_base + REG_MLBC0);
++ c0_val &= ~MLBC0_MLBCLK_MASK;
++
++ /* check fps value */
++ switch (fps) {
++ case 256:
++ case 512:
++ case 1024:
++ pdevinfo->fps = fps >> 9;
++ c0_val &= ~MLBC0_MLBPEN;
++ c0_val |= (fps >> 9)
++ << MLBC0_MLBCLK_SHIFT;
++
++ if (1024 == fps) {
++ /*
++ * Invert output clock phase
++ * in 1024 fps
++ */
++ __raw_writel(0x1,
++ mlb_base + REG_MLBPC2);
++ }
++ break;
++ case 2048:
++ case 3072:
++ case 4096:
++ pdevinfo->fps = (fps >> 10) + 1;
++ c0_val |= ((fps >> 10) + 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ case 6144:
++ pdevinfo->fps = fps >> 10;
++ c0_val |= ((fps >> 10) + 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ case 8192:
++ pdevinfo->fps = (fps >> 10) - 1;
++ c0_val |= ((fps >> 10) - 1)
++ << MLBC0_MLBCLK_SHIFT;
++ break;
++ default:
++ pr_debug("mxc_mlb150: invalid fps argument: %d\n",
++ fps);
++ return -EINVAL;
++ }
++
++ __raw_writel(c0_val, mlb_base + REG_MLBC0);
++
++ pr_debug("mxc_mlb150: set fps to %d, MLBC0: 0x%08x\n",
++ fps,
++ (u32)__raw_readl(mlb_base + REG_MLBC0));
++
++ break;
++ }
++
++ case MLB_GET_VER:
++ {
++ u32 version;
++
++ /* get MLB device module version */
++ version = 0x03030003;
++
++ pr_debug("mxc_mlb150: get version: 0x%08x\n",
++ version);
++
++ if (copy_to_user(argp, &version, sizeof(version))) {
++ pr_err("mxc_mlb150: copy to user failed\n");
++ return -EFAULT;
++ }
++ break;
++ }
++
++ case MLB_SET_DEVADDR:
++ {
++ u32 c1_val;
++ u8 devaddr;
++
++ /* get MLB device address from user space */
++ if (copy_from_user
++ (&devaddr, argp, sizeof(unsigned char))) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ c1_val = __raw_readl(mlb_base + REG_MLBC1);
++ c1_val &= ~MLBC1_NDA_MASK;
++ c1_val |= devaddr << MLBC1_NDA_SHIFT;
++ __raw_writel(c1_val, mlb_base + REG_MLBC1);
++ pr_debug("mxc_mlb150: set dev addr, dev addr: %d, "
++ "MLBC1: 0x%08x\n", devaddr,
++ (u32)__raw_readl(mlb_base + REG_MLBC1));
++
++ break;
++ }
++
++ case MLB_IRQ_DISABLE:
++ {
++ disable_irq(drvdata->irq_mlb);
++ break;
++ }
++
++ case MLB_IRQ_ENABLE:
++ {
++ enable_irq(drvdata->irq_mlb);
++ break;
++ }
++ default:
++ pr_info("mxc_mlb150: Invalid ioctl command\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++/*
++ * MLB read routine
++ * Read the current received data from queued buffer,
++ * and free this buffer for hw to fill ingress data.
++ */
++static ssize_t mxc_mlb150_read(struct file *filp, char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ int size;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++
++ head = ACCESS_ONCE(rx_rbuf->head);
++ tail = rx_rbuf->tail;
++
++ read_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ /* check the current rx buffer is available or not */
++ if (0 == CIRC_CNT(head, tail, TRANS_RING_NODES)) {
++
++ if (filp->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ do {
++ DEFINE_WAIT(__wait);
++
++ for (;;) {
++ prepare_to_wait(&pdevinfo->rx_wq,
++ &__wait, TASK_INTERRUPTIBLE);
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ if (CIRC_CNT(rx_rbuf->head, rx_rbuf->tail,
++ TRANS_RING_NODES) > 0) {
++ read_unlock_irqrestore(&rx_rbuf->rb_lock,
++ flags);
++ break;
++ }
++ read_unlock_irqrestore(&rx_rbuf->rb_lock,
++ flags);
++
++ if (!signal_pending(current)) {
++ schedule();
++ continue;
++ }
++ return -ERESTARTSYS;
++ }
++ finish_wait(&pdevinfo->rx_wq, &__wait);
++ } while (0);
++ }
++
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ size = pdevinfo->adt_buf_dep;
++ if (size > count) {
++ /* the user buffer is too small */
++ pr_warning
++ ("mxc_mlb150: received data size is bigger than "
++ "size: %d, count: %d\n", size, count);
++ return -EINVAL;
++ }
++
++ /* extract one item from the buffer */
++ if (copy_to_user(buf, rx_rbuf->virt_bufs[tail], size)) {
++ pr_err("mxc_mlb150: copy from user failed\n");
++ return -EFAULT;
++ }
++
++ /* finish reading descriptor before incrementing tail */
++ smp_mb();
++
++ write_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ rx_rbuf->tail = (tail + 1) & (TRANS_RING_NODES - 1);
++ write_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ *f_pos = 0;
++
++ return size;
++}
++
++/*
++ * MLB write routine
++ * Copy the user data to tx channel buffer,
++ * and prepare the channel current/next buffer ptr.
++ */
++static ssize_t mxc_mlb150_write(struct file *filp, const char __user *buf,
++ size_t count, loff_t *f_pos)
++{
++ s32 ret = 0;
++ struct mlb_channel_info *pchinfo = NULL;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++ /*
++ * minor = MINOR(filp->f_dentry->d_inode->i_rdev);
++ */
++ pchinfo = &pdevinfo->channels[TX_CHANNEL];
++
++ if (count > pdevinfo->buf_size) {
++ /* too many data to write */
++ pr_warning("mxc_mlb150: overflow write data\n");
++ return -EFBIG;
++ }
++
++ *f_pos = 0;
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++
++ head = tx_rbuf->head;
++ tail = ACCESS_ONCE(tx_rbuf->tail);
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ if (0 == CIRC_SPACE(head, tail, TRANS_RING_NODES)) {
++ if (filp->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++ do {
++ DEFINE_WAIT(__wait);
++
++ for (;;) {
++ prepare_to_wait(&pdevinfo->tx_wq,
++ &__wait, TASK_INTERRUPTIBLE);
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ if (CIRC_SPACE(tx_rbuf->head, tx_rbuf->tail,
++ TRANS_RING_NODES) > 0) {
++ read_unlock_irqrestore(&tx_rbuf->rb_lock,
++ flags);
++ break;
++ }
++ read_unlock_irqrestore(&tx_rbuf->rb_lock,
++ flags);
++
++ if (!signal_pending(current)) {
++ schedule();
++ continue;
++ }
++ return -ERESTARTSYS;
++ }
++ finish_wait(&pdevinfo->tx_wq, &__wait);
++ } while (0);
++ }
++
++ if (copy_from_user((void *)tx_rbuf->virt_bufs[head], buf, count)) {
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++ pr_err("mxc_mlb: copy from user failed\n");
++ ret = -EFAULT;
++ goto out;
++ }
++
++ write_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ smp_wmb();
++ tx_rbuf->head = (head + 1) & (TRANS_RING_NODES - 1);
++ write_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ if (0 == CIRC_CNT(head, tail, TRANS_RING_NODES)) {
++ u32 tx_buf_ptr, ahb_ch;
++ s32 adt_sts;
++ u32 ctype = pdevinfo->channel_type;
++
++ /* read index before reading contents at that index */
++ smp_read_barrier_depends();
++
++ tx_buf_ptr = tx_rbuf->phy_addrs[tail];
++
++ ahb_ch = pdevinfo->channels[TX_CHANNEL].cl;
++ adt_sts = mlb150_dev_get_adt_sts(ahb_ch);
++
++ /* Set ADT for TX */
++ mlb150_dev_pipo_next(ahb_ch, ctype, adt_sts, tx_buf_ptr);
++ }
++
++ ret = count;
++out:
++ return ret;
++}
++
++static unsigned int mxc_mlb150_poll(struct file *filp,
++ struct poll_table_struct *wait)
++{
++ int minor;
++ unsigned int ret = 0;
++ struct mlb_data *drvdata = filp->private_data;
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++ struct mlb_ringbuf *tx_rbuf = &pdevinfo->tx_rbuf;
++ struct mlb_ringbuf *rx_rbuf = &pdevinfo->rx_rbuf;
++ int head, tail;
++ unsigned long flags;
++
++
++ minor = MINOR(filp->f_path.dentry->d_inode->i_rdev);
++
++ poll_wait(filp, &pdevinfo->rx_wq, wait);
++ poll_wait(filp, &pdevinfo->tx_wq, wait);
++
++ read_lock_irqsave(&tx_rbuf->rb_lock, flags);
++ head = tx_rbuf->head;
++ tail = tx_rbuf->tail;
++ read_unlock_irqrestore(&tx_rbuf->rb_lock, flags);
++
++ /* check the tx buffer is avaiable or not */
++ if (CIRC_SPACE(head, tail, TRANS_RING_NODES) >= 1)
++ ret |= POLLOUT | POLLWRNORM;
++
++ read_lock_irqsave(&rx_rbuf->rb_lock, flags);
++ head = rx_rbuf->head;
++ tail = rx_rbuf->tail;
++ read_unlock_irqrestore(&rx_rbuf->rb_lock, flags);
++
++ /* check the rx buffer filled or not */
++ if (CIRC_CNT(head, tail, TRANS_RING_NODES) >= 1)
++ ret |= POLLIN | POLLRDNORM;
++
++
++ /* check the exception event */
++ if (pdevinfo->ex_event)
++ ret |= POLLIN | POLLRDNORM;
++
++ return ret;
++}
++
++/*
++ * char dev file operations structure
++ */
++static const struct file_operations mxc_mlb150_fops = {
++
++ .owner = THIS_MODULE,
++ .open = mxc_mlb150_open,
++ .release = mxc_mlb150_release,
++ .unlocked_ioctl = mxc_mlb150_ioctl,
++ .poll = mxc_mlb150_poll,
++ .read = mxc_mlb150_read,
++ .write = mxc_mlb150_write,
++};
++
++static struct platform_device_id imx_mlb150_devtype[] = {
++ {
++ .name = "imx6q-mlb150",
++ .driver_data = 0,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_mlb150_devtype);
++
++static const struct of_device_id mlb150_imx_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mlb150", .data = &imx_mlb150_devtype[0], },
++ { /* sentinel */ }
++};
++
++/*
++ * This function is called whenever the MLB device is detected.
++ */
++static int mxc_mlb150_probe(struct platform_device *pdev)
++{
++ int ret, mlb_major, i;
++ struct mlb_data *drvdata;
++ struct resource *res;
++ struct device_node *np = pdev->dev.of_node;
++
++ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct mlb_data),
++ GFP_KERNEL);
++ if (!drvdata) {
++ dev_err(&pdev->dev, "can't allocate enough memory\n");
++ return -ENOMEM;
++ }
++
++ /*
++ * Register MLB lld as four character devices
++ */
++ ret = alloc_chrdev_region(&drvdata->firstdev, 0,
++ MLB_MINOR_DEVICES, "mxc_mlb150");
++ if (ret < 0) {
++ dev_err(&pdev->dev, "alloc region error\n");
++ goto err_reg;
++ }
++ mlb_major = MAJOR(drvdata->firstdev);
++ dev_dbg(&pdev->dev, "MLB device major: %d\n", mlb_major);
++
++ cdev_init(&drvdata->cdev, &mxc_mlb150_fops);
++ drvdata->cdev.owner = THIS_MODULE;
++
++ ret = cdev_add(&drvdata->cdev, drvdata->firstdev, MLB_MINOR_DEVICES);
++ if (ret) {
++ dev_err(&pdev->dev, "can't add cdev\n");
++ goto err_reg;
++ }
++
++ /* create class and device for udev information */
++ drvdata->class = class_create(THIS_MODULE, "mlb150");
++ if (IS_ERR(drvdata->class)) {
++ dev_err(&pdev->dev, "failed to create device class\n");
++ ret = -ENOMEM;
++ goto err_class;
++ }
++
++ for (i = 0; i < MLB_MINOR_DEVICES; i++) {
++ struct device *class_dev;
++
++ class_dev = device_create(drvdata->class, NULL,
++ MKDEV(mlb_major, i),
++ NULL, mlb_devinfo[i].dev_name);
++ if (IS_ERR(class_dev)) {
++ dev_err(&pdev->dev, "failed to create mlb150 %s"
++ " class device\n", mlb_devinfo[i].dev_name);
++ ret = -ENOMEM;
++ goto err_dev;
++ }
++ }
++
++ /* ahb0 irq */
++ drvdata->irq_ahb0 = platform_get_irq(pdev, 1);
++ if (drvdata->irq_ahb0 < 0) {
++ dev_err(&pdev->dev, "No ahb0 irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "ahb0_irq: %d\n", drvdata->irq_ahb0);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_ahb0, mlb_ahb_isr,
++ 0, "mlb_ahb0", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_ahb0);
++ goto err_dev;
++ }
++
++ /* ahb1 irq */
++ drvdata->irq_ahb1 = platform_get_irq(pdev, 2);
++ if (drvdata->irq_ahb1 < 0) {
++ dev_err(&pdev->dev, "No ahb1 irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "ahb1_irq: %d\n", drvdata->irq_ahb1);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_ahb1, mlb_ahb_isr,
++ 0, "mlb_ahb1", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_ahb1);
++ goto err_dev;
++ }
++
++ /* mlb irq */
++ drvdata->irq_mlb = platform_get_irq(pdev, 0);
++ if (drvdata->irq_mlb < 0) {
++ dev_err(&pdev->dev, "No mlb irq line provided\n");
++ goto err_dev;
++ }
++ dev_dbg(&pdev->dev, "mlb_irq: %d\n", drvdata->irq_mlb);
++ if (devm_request_irq(&pdev->dev, drvdata->irq_mlb, mlb_isr,
++ 0, "mlb", NULL)) {
++ dev_err(&pdev->dev, "can't claim irq %d\n", drvdata->irq_mlb);
++ goto err_dev;
++ }
++
++ /* ioremap from phy mlb to kernel space */
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "can't get device resources\n");
++ ret = -ENOENT;
++ goto err_dev;
++ }
++ mlb_base = devm_ioremap_resource(&pdev->dev, res);
++ dev_dbg(&pdev->dev, "mapped base address: 0x%08x\n", (u32)mlb_base);
++ if (IS_ERR(mlb_base)) {
++ dev_err(&pdev->dev,
++ "failed to get ioremap base\n");
++ ret = PTR_ERR(mlb_base);
++ goto err_dev;
++ }
++ drvdata->membase = mlb_base;
++
++#ifdef CONFIG_REGULATOR
++ drvdata->nvcc = devm_regulator_get(&pdev->dev, "reg_nvcc");
++ if (!IS_ERR(drvdata->nvcc)) {
++ regulator_set_voltage(drvdata->nvcc, 2500000, 2500000);
++ dev_err(&pdev->dev, "enalbe regulator\n");
++ ret = regulator_enable(drvdata->nvcc);
++ if (ret) {
++ dev_err(&pdev->dev, "vdd set voltage error\n");
++ goto err_dev;
++ }
++ }
++#endif
++
++ /* enable clock */
++ drvdata->clk_mlb3p = devm_clk_get(&pdev->dev, "mlb");
++ if (IS_ERR(drvdata->clk_mlb3p)) {
++ dev_err(&pdev->dev, "unable to get mlb clock\n");
++ ret = PTR_ERR(drvdata->clk_mlb3p);
++ goto err_dev;
++ }
++
++ drvdata->clk_mlb6p = devm_clk_get(&pdev->dev, "pll8_mlb");
++ if (IS_ERR(drvdata->clk_mlb6p)) {
++ dev_err(&pdev->dev, "unable to get mlb pll clock\n");
++ ret = PTR_ERR(drvdata->clk_mlb6p);
++ goto err_dev;
++ }
++
++
++ drvdata->iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!drvdata->iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ ret = -ENOMEM;
++ goto err_dev;
++ }
++
++ drvdata->devinfo = NULL;
++ mxc_mlb150_irq_enable(drvdata, 0);
++ platform_set_drvdata(pdev, drvdata);
++ return 0;
++
++err_dev:
++ for (--i; i >= 0; i--)
++ device_destroy(drvdata->class, MKDEV(mlb_major, i));
++
++ class_destroy(drvdata->class);
++err_class:
++ cdev_del(&drvdata->cdev);
++err_reg:
++ unregister_chrdev_region(drvdata->firstdev, MLB_MINOR_DEVICES);
++
++ return ret;
++}
++
++static int mxc_mlb150_remove(struct platform_device *pdev)
++{
++ int i;
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt))
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++
++ /* disable mlb power */
++#ifdef CONFIG_REGULATOR
++ if (!IS_ERR(drvdata->nvcc))
++ regulator_disable(drvdata->nvcc);
++#endif
++
++ /* destroy mlb device class */
++ for (i = MLB_MINOR_DEVICES - 1; i >= 0; i--)
++ device_destroy(drvdata->class,
++ MKDEV(MAJOR(drvdata->firstdev), i));
++ class_destroy(drvdata->class);
++
++ cdev_del(&drvdata->cdev);
++
++ /* Unregister the two MLB devices */
++ unregister_chrdev_region(drvdata->firstdev, MLB_MINOR_DEVICES);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM
++static int mxc_mlb150_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->on)
++ && (pdevinfo->fps >= CLK_2048FS))
++ clk_disable_unprepare(drvdata->clk_mlb6p);
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt)) {
++ mlb150_dev_exit();
++ clk_disable_unprepare(drvdata->clk_mlb3p);
++ }
++
++ return 0;
++}
++
++static int mxc_mlb150_resume(struct platform_device *pdev)
++{
++ struct mlb_data *drvdata = platform_get_drvdata(pdev);
++ struct mlb_dev_info *pdevinfo = drvdata->devinfo;
++
++ if (pdevinfo && atomic_read(&pdevinfo->opencnt)) {
++ clk_prepare_enable(drvdata->clk_mlb3p);
++ mlb150_dev_init();
++ }
++
++ if (pdevinfo && atomic_read(&pdevinfo->on) &&
++ (pdevinfo->fps >= CLK_2048FS))
++ clk_prepare_enable(drvdata->clk_mlb6p);
++
++ return 0;
++}
++#else
++#define mxc_mlb150_suspend NULL
++#define mxc_mlb150_resume NULL
++#endif
++
++/*
++ * platform driver structure for MLB
++ */
++static struct platform_driver mxc_mlb150_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
++ .of_match_table = mlb150_imx_dt_ids,
++ },
++ .probe = mxc_mlb150_probe,
++ .remove = mxc_mlb150_remove,
++ .suspend = mxc_mlb150_suspend,
++ .resume = mxc_mlb150_resume,
++ .id_table = imx_mlb150_devtype,
++};
++
++static int __init mxc_mlb150_init(void)
++{
++ return platform_driver_register(&mxc_mlb150_driver);
++}
++
++static void __exit mxc_mlb150_exit(void)
++{
++ platform_driver_unregister(&mxc_mlb150_driver);
++}
++
++module_init(mxc_mlb150_init);
++module_exit(mxc_mlb150_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MLB150 low level driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/drivers/mxc/vpu/Kconfig linux-xbian-imx6/drivers/mxc/vpu/Kconfig
+--- linux-4.1.3/drivers/mxc/vpu/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/vpu/Kconfig 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,31 @@
++#
++# Codec configuration
++#
++
++menu "MXC VPU(Video Processing Unit) support"
++
++config MXC_VPU
++ tristate "Support for MXC VPU(Video Processing Unit)"
++ depends on (SOC_IMX27 || SOC_IMX5 || SOC_IMX6Q)
++ default y
++ ---help---
++ The VPU codec device provides codec function for H.264/MPEG4/H.263,
++ as well as MPEG2/VC-1/DivX on some platforms.
++
++config MXC_VPU_DEBUG
++ bool "MXC VPU debugging"
++ depends on MXC_VPU != n
++ help
++ This is an option for the developers; most people should
++ say N here. This enables MXC VPU driver debugging.
++
++config MX6_VPU_352M
++ bool "MX6 VPU 352M"
++ depends on MXC_VPU
++ default n
++ help
++ Increase VPU frequncy to 352M, the config will disable bus frequency
++ adjust dynamic, and CPU lowest setpoint will be 352Mhz.
++ This config is used for special VPU use case.
++
++endmenu
+diff -Nur linux-4.1.3/drivers/mxc/vpu/Makefile linux-xbian-imx6/drivers/mxc/vpu/Makefile
+--- linux-4.1.3/drivers/mxc/vpu/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/vpu/Makefile 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,9 @@
++#
++# Makefile for the VPU drivers.
++#
++
++obj-$(CONFIG_MXC_VPU) += mxc_vpu.o
++
++ifeq ($(CONFIG_MXC_VPU_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+diff -Nur linux-4.1.3/drivers/mxc/vpu/mxc_vpu.c linux-xbian-imx6/drivers/mxc/vpu/mxc_vpu.c
+--- linux-4.1.3/drivers/mxc/vpu/mxc_vpu.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/mxc/vpu/mxc_vpu.c 2015-07-27 23:13:06.230751680 +0200
+@@ -0,0 +1,1344 @@
++/*
++ * Copyright 2006-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_vpu.c
++ *
++ * @brief VPU system initialization and file operation implementation
++ *
++ * @ingroup VPU
++ */
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/ioport.h>
++#include <linux/stat.h>
++#include <linux/platform_device.h>
++#include <linux/kdev_t.h>
++#include <linux/dma-mapping.h>
++#include <linux/wait.h>
++#include <linux/list.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/fsl_devices.h>
++#include <linux/uaccess.h>
++#include <linux/io.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++#include <linux/sched.h>
++#include <linux/vmalloc.h>
++#include <linux/regulator/consumer.h>
++#include <linux/page-flags.h>
++#include <linux/mm_types.h>
++#include <linux/types.h>
++#include <linux/memblock.h>
++#include <linux/memory.h>
++#include <linux/version.h>
++#include <asm/page.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++#include <linux/module.h>
++#include <linux/pm_runtime.h>
++#include <linux/sizes.h>
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
++#include <linux/iram_alloc.h>
++#include <mach/clock.h>
++#include <mach/hardware.h>
++#include <mach/mxc_vpu.h>
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++#include <linux/busfreq-imx6.h>
++#include <linux/clk.h>
++#include <linux/genalloc.h>
++#include <linux/mxc_vpu.h>
++#include <linux/of.h>
++#include <linux/reset.h>
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++#include <mach/busfreq.h>
++#include <mach/common.h>
++#else
++#include <asm/sizes.h>
++#endif
++
++/* Define one new pgprot which combined uncached and XN(never executable) */
++#define pgprot_noncachedxn(prot) \
++ __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
++
++struct vpu_priv {
++ struct fasync_struct *async_queue;
++ struct work_struct work;
++ struct workqueue_struct *workqueue;
++ struct mutex lock;
++};
++
++/* To track the allocated memory buffer */
++struct memalloc_record {
++ struct list_head list;
++ struct vpu_mem_desc mem;
++};
++
++struct iram_setting {
++ u32 start;
++ u32 end;
++};
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static struct gen_pool *iram_pool;
++static u32 iram_base;
++#endif
++
++static LIST_HEAD(head);
++
++static int vpu_major;
++static int vpu_clk_usercount;
++static struct class *vpu_class;
++static struct vpu_priv vpu_data;
++static u8 open_count;
++static struct clk *vpu_clk;
++static struct vpu_mem_desc bitwork_mem = { 0 };
++static struct vpu_mem_desc pic_para_mem = { 0 };
++static struct vpu_mem_desc user_data_mem = { 0 };
++static struct vpu_mem_desc share_mem = { 0 };
++static struct vpu_mem_desc vshare_mem = { 0 };
++
++static void __iomem *vpu_base;
++static int vpu_ipi_irq;
++static u32 phy_vpu_base_addr;
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++static phys_addr_t top_address_DRAM;
++static struct mxc_vpu_platform_data *vpu_plat;
++#endif
++
++static struct device *vpu_dev;
++
++/* IRAM setting */
++static struct iram_setting iram;
++
++/* implement the blocking ioctl */
++static int irq_status;
++static int codec_done;
++static wait_queue_head_t vpu_queue;
++
++#ifdef CONFIG_SOC_IMX6Q
++#define MXC_VPU_HAS_JPU
++#endif
++
++#ifdef MXC_VPU_HAS_JPU
++static int vpu_jpu_irq;
++#endif
++
++#ifdef CONFIG_PM
++static unsigned int regBk[64];
++static unsigned int pc_before_suspend;
++#endif
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static struct regulator *vpu_regulator;
++#endif
++static atomic_t clk_cnt_from_ioc = ATOMIC_INIT(0);
++
++#define READ_REG(x) readl_relaxed(vpu_base + x)
++#define WRITE_REG(val, x) writel_relaxed(val, vpu_base + x)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++/* redirect to static functions */
++static int cpu_is_mx6dl(void)
++{
++ int ret;
++ ret = of_machine_is_compatible("fsl,imx6dl");
++ return ret;
++}
++
++static int cpu_is_mx6q(void)
++{
++ int ret;
++ ret = of_machine_is_compatible("fsl,imx6q");
++ return ret;
++}
++#endif
++
++static void vpu_reset(void)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ device_reset(vpu_dev);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ imx_src_reset_vpu();
++#else
++ if (vpu_plat->reset)
++ vpu_plat->reset();
++#endif
++}
++
++static long vpu_power_get(bool on)
++{
++ long ret = 0;
++
++ if (on) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ vpu_regulator = regulator_get(NULL, "cpu_vddvpu");
++ ret = IS_ERR(vpu_regulator);
++#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ vpu_regulator = devm_regulator_get(vpu_dev, "pu");
++ ret = IS_ERR(vpu_regulator);
++#endif
++ } else {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (!IS_ERR(vpu_regulator))
++ regulator_put(vpu_regulator);
++#endif
++ }
++ return ret;
++}
++
++static void vpu_power_up(bool on)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ int ret = 0;
++
++ if (on) {
++ if (!IS_ERR(vpu_regulator)) {
++ ret = regulator_enable(vpu_regulator);
++ if (ret)
++ dev_err(vpu_dev, "failed to power up vpu\n");
++ }
++ } else {
++ if (!IS_ERR(vpu_regulator)) {
++ ret = regulator_disable(vpu_regulator);
++ if (ret)
++ dev_err(vpu_dev, "failed to power down vpu\n");
++ }
++ }
++#else
++ imx_gpc_power_up_pu(on);
++#endif
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++static int cpu_is_mx53(void)
++{
++ return 0;
++}
++
++static int cpu_is_mx51(void)
++{
++ return 0;
++}
++
++#define VM_RESERVED 0
++#endif
++
++/*!
++ * Private function to alloc dma buffer
++ * @return status 0 success.
++ */
++static int vpu_alloc_dma_buffer(struct vpu_mem_desc *mem)
++{
++ mem->cpu_addr = (unsigned long)
++ dma_zalloc_coherent(NULL, PAGE_ALIGN(mem->size),
++ (dma_addr_t *) (&mem->phy_addr),
++ GFP_DMA | GFP_KERNEL);
++ dev_dbg(vpu_dev, "[ALLOC] mem alloc cpu_addr = 0x%x\n", mem->cpu_addr);
++ if ((void *)(mem->cpu_addr) == NULL) {
++ dev_err(vpu_dev, "Physical memory allocation error!\n");
++ return -1;
++ }
++ return 0;
++}
++
++/*!
++ * Private function to free dma buffer
++ */
++static void vpu_free_dma_buffer(struct vpu_mem_desc *mem)
++{
++ if (mem->cpu_addr != 0) {
++ dma_free_coherent(0, PAGE_ALIGN(mem->size),
++ (void *)mem->cpu_addr, mem->phy_addr);
++ }
++}
++
++/*!
++ * Private function to free buffers
++ * @return status 0 success.
++ */
++static int vpu_free_buffers(void)
++{
++ struct memalloc_record *rec, *n;
++ struct vpu_mem_desc mem;
++
++ list_for_each_entry_safe(rec, n, &head, list) {
++ mem = rec->mem;
++ if (mem.cpu_addr != 0) {
++ vpu_free_dma_buffer(&mem);
++ dev_dbg(vpu_dev, "[FREE] freed paddr=0x%08X\n", mem.phy_addr);
++ /* delete from list */
++ list_del(&rec->list);
++ kfree(rec);
++ }
++ }
++
++ return 0;
++}
++
++static inline void vpu_worker_callback(struct work_struct *w)
++{
++ struct vpu_priv *dev = container_of(w, struct vpu_priv,
++ work);
++
++ if (dev->async_queue)
++ kill_fasync(&dev->async_queue, SIGIO, POLL_IN);
++
++ irq_status = 1;
++ /*
++ * Clock is gated on when dec/enc started, gate it off when
++ * codec is done.
++ */
++ if (codec_done)
++ codec_done = 0;
++
++ wake_up_interruptible(&vpu_queue);
++}
++
++/*!
++ * @brief vpu interrupt handler
++ */
++static irqreturn_t vpu_ipi_irq_handler(int irq, void *dev_id)
++{
++ struct vpu_priv *dev = dev_id;
++ unsigned long reg;
++
++ reg = READ_REG(BIT_INT_REASON);
++ if (reg & 0x8)
++ codec_done = 1;
++ WRITE_REG(0x1, BIT_INT_CLEAR);
++
++ queue_work(dev->workqueue, &dev->work);
++
++ return IRQ_HANDLED;
++}
++
++/*!
++ * @brief vpu jpu interrupt handler
++ */
++#ifdef MXC_VPU_HAS_JPU
++static irqreturn_t vpu_jpu_irq_handler(int irq, void *dev_id)
++{
++ struct vpu_priv *dev = dev_id;
++ unsigned long reg;
++
++ reg = READ_REG(MJPEG_PIC_STATUS_REG);
++ if (reg & 0x3)
++ codec_done = 1;
++
++ queue_work(dev->workqueue, &dev->work);
++
++ return IRQ_HANDLED;
++}
++#endif
++
++/*!
++ * @brief check phy memory prepare to pass to vpu is valid or not, we
++ * already address some issue that if pass a wrong address to vpu
++ * (like virtual address), system will hang.
++ *
++ * @return true return is a valid phy memory address, false return not.
++ */
++bool vpu_is_valid_phy_memory(u32 paddr)
++{
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (paddr > top_address_DRAM)
++ return false;
++#endif
++
++ return true;
++}
++
++/*!
++ * @brief open function for vpu file operation
++ *
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_open(struct inode *inode, struct file *filp)
++{
++
++ mutex_lock(&vpu_data.lock);
++
++ if (open_count++ == 0) {
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_get_sync(vpu_dev);
++#endif
++ vpu_power_up(true);
++
++#ifdef CONFIG_SOC_IMX6Q
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_CUR_PC))
++ dev_dbg(vpu_dev, "Not power off before vpu open!\n");
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++#endif
++ }
++
++ filp->private_data = (void *)(&vpu_data);
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++/*!
++ * @brief IO ctrl function for vpu file operation
++ * @param cmd IO ctrl command
++ * @return 0 on success or negative error code on error
++ */
++static long vpu_ioctl(struct file *filp, u_int cmd,
++ u_long arg)
++{
++ int ret = 0;
++
++ switch (cmd) {
++ case VPU_IOC_PHYMEM_ALLOC:
++ {
++ struct memalloc_record *rec;
++
++ rec = kzalloc(sizeof(*rec), GFP_KERNEL);
++ if (!rec)
++ return -ENOMEM;
++
++ ret = copy_from_user(&(rec->mem),
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ kfree(rec);
++ return -EFAULT;
++ }
++
++ dev_dbg(vpu_dev, "[ALLOC] mem alloc size = 0x%x\n",
++ rec->mem.size);
++
++ ret = vpu_alloc_dma_buffer(&(rec->mem));
++ if (ret == -1) {
++ kfree(rec);
++ dev_err(vpu_dev,
++ "Physical memory allocation error!\n");
++ break;
++ }
++ ret = copy_to_user((void __user *)arg, &(rec->mem),
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ kfree(rec);
++ ret = -EFAULT;
++ break;
++ }
++
++ mutex_lock(&vpu_data.lock);
++ list_add(&rec->list, &head);
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ case VPU_IOC_PHYMEM_FREE:
++ {
++ struct memalloc_record *rec, *n;
++ struct vpu_mem_desc vpu_mem;
++
++ ret = copy_from_user(&vpu_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret)
++ return -EACCES;
++
++ dev_dbg(vpu_dev, "[FREE] mem freed cpu_addr = 0x%x\n",
++ vpu_mem.cpu_addr);
++ if ((void *)vpu_mem.cpu_addr != NULL)
++ vpu_free_dma_buffer(&vpu_mem);
++
++ mutex_lock(&vpu_data.lock);
++ list_for_each_entry_safe(rec, n, &head, list) {
++ if (rec->mem.cpu_addr == vpu_mem.cpu_addr) {
++ /* delete from list */
++ list_del(&rec->list);
++ kfree(rec);
++ break;
++ }
++ }
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ case VPU_IOC_WAIT4INT:
++ {
++ u_long timeout = (u_long) arg;
++ if (!wait_event_interruptible_timeout
++ (vpu_queue, irq_status != 0,
++ msecs_to_jiffies(timeout))) {
++ dev_warn(vpu_dev, "VPU blocking: timeout.\n");
++ ret = -ETIME;
++ } else if (signal_pending(current)) {
++ dev_warn(vpu_dev, "VPU interrupt received.\n");
++ ret = -ERESTARTSYS;
++ } else
++ irq_status = 0;
++ break;
++ }
++ case VPU_IOC_IRAM_SETTING:
++ {
++ ret = copy_to_user((void __user *)arg, &iram,
++ sizeof(struct iram_setting));
++ if (ret)
++ ret = -EFAULT;
++
++ break;
++ }
++ case VPU_IOC_CLKGATE_SETTING:
++ {
++ u32 clkgate_en;
++
++ if (get_user(clkgate_en, (u32 __user *) arg))
++ return -EFAULT;
++
++ if (clkgate_en) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ atomic_inc(&clk_cnt_from_ioc);
++ } else {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ atomic_dec(&clk_cnt_from_ioc);
++ }
++
++ break;
++ }
++ case VPU_IOC_GET_SHARE_MEM:
++ {
++ mutex_lock(&vpu_data.lock);
++ if (share_mem.cpu_addr != 0) {
++ ret = copy_to_user((void __user *)arg,
++ &share_mem,
++ sizeof(struct vpu_mem_desc));
++ mutex_unlock(&vpu_data.lock);
++ break;
++ } else {
++ if (copy_from_user(&share_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc))) {
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++ if (vpu_alloc_dma_buffer(&share_mem) == -1)
++ ret = -EFAULT;
++ else {
++ if (copy_to_user((void __user *)arg,
++ &share_mem,
++ sizeof(struct
++ vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ }
++ mutex_unlock(&vpu_data.lock);
++ break;
++ }
++ case VPU_IOC_REQ_VSHARE_MEM:
++ {
++ mutex_lock(&vpu_data.lock);
++ if (vshare_mem.cpu_addr != 0) {
++ ret = copy_to_user((void __user *)arg,
++ &vshare_mem,
++ sizeof(struct vpu_mem_desc));
++ mutex_unlock(&vpu_data.lock);
++ break;
++ } else {
++ if (copy_from_user(&vshare_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct
++ vpu_mem_desc))) {
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++ /* vmalloc shared memory if not allocated */
++ if (!vshare_mem.cpu_addr)
++ vshare_mem.cpu_addr =
++ (unsigned long)
++ vmalloc_user(vshare_mem.size);
++ if (copy_to_user
++ ((void __user *)arg, &vshare_mem,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ mutex_unlock(&vpu_data.lock);
++ break;
++ }
++ case VPU_IOC_GET_WORK_ADDR:
++ {
++ if (bitwork_mem.cpu_addr != 0) {
++ ret =
++ copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct vpu_mem_desc));
++ break;
++ } else {
++ if (copy_from_user(&bitwork_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc)))
++ return -EFAULT;
++
++ if (vpu_alloc_dma_buffer(&bitwork_mem) == -1)
++ ret = -EFAULT;
++ else if (copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct
++ vpu_mem_desc)))
++ ret = -EFAULT;
++ }
++ break;
++ }
++ /*
++ * The following two ioctl is used when user allocates working buffer
++ * and register it to vpu driver.
++ */
++ case VPU_IOC_QUERY_BITWORK_MEM:
++ {
++ if (copy_to_user((void __user *)arg,
++ &bitwork_mem,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ break;
++ }
++ case VPU_IOC_SET_BITWORK_MEM:
++ {
++ if (copy_from_user(&bitwork_mem,
++ (struct vpu_mem_desc *)arg,
++ sizeof(struct vpu_mem_desc)))
++ ret = -EFAULT;
++ break;
++ }
++ case VPU_IOC_SYS_SW_RESET:
++ {
++ vpu_reset();
++ break;
++ }
++ case VPU_IOC_REG_DUMP:
++ break;
++ case VPU_IOC_PHYMEM_DUMP:
++ break;
++ case VPU_IOC_PHYMEM_CHECK:
++ {
++ struct vpu_mem_desc check_memory;
++ ret = copy_from_user(&check_memory,
++ (void __user *)arg,
++ sizeof(struct vpu_mem_desc));
++ if (ret != 0) {
++ dev_err(vpu_dev, "copy from user failure:%d\n", ret);
++ ret = -EFAULT;
++ break;
++ }
++ ret = vpu_is_valid_phy_memory((u32)check_memory.phy_addr);
++
++ dev_dbg(vpu_dev, "vpu: memory phy:0x%x %s phy memory\n",
++ check_memory.phy_addr, (ret ? "is" : "isn't"));
++ /* borrow .size to pass back the result. */
++ check_memory.size = ret;
++ ret = copy_to_user((void __user *)arg, &check_memory,
++ sizeof(struct vpu_mem_desc));
++ if (ret) {
++ ret = -EFAULT;
++ break;
++ }
++ break;
++ }
++ case VPU_IOC_LOCK_DEV:
++ {
++ u32 lock_en;
++
++ if (get_user(lock_en, (u32 __user *) arg))
++ return -EFAULT;
++
++ if (lock_en)
++ mutex_lock(&vpu_data.lock);
++ else
++ mutex_unlock(&vpu_data.lock);
++
++ break;
++ }
++ default:
++ {
++ dev_err(vpu_dev, "No such IOCTL, cmd is %d\n", cmd);
++ ret = -EINVAL;
++ break;
++ }
++ }
++ return ret;
++}
++
++/*!
++ * @brief Release function for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_release(struct inode *inode, struct file *filp)
++{
++ int i;
++ unsigned long timeout;
++
++ mutex_lock(&vpu_data.lock);
++
++ if (open_count > 0 && !(--open_count)) {
++
++ /* Wait for vpu go to idle state */
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_CUR_PC)) {
++
++ timeout = jiffies + HZ;
++ while (READ_REG(BIT_BUSY_FLAG)) {
++ msleep(1);
++ if (time_after(jiffies, timeout)) {
++ dev_warn(vpu_dev, "VPU timeout during release\n");
++ break;
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ /* Clean up interrupt */
++ cancel_work_sync(&vpu_data.work);
++ flush_workqueue(vpu_data.workqueue);
++ irq_status = 0;
++
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ if (READ_REG(BIT_BUSY_FLAG)) {
++
++ if (cpu_is_mx51() || cpu_is_mx53()) {
++ dev_err(vpu_dev,
++ "fatal error: can't gate/power off when VPU is busy\n");
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ }
++
++#ifdef CONFIG_SOC_IMX6Q
++ if (cpu_is_mx6dl() || cpu_is_mx6q()) {
++ WRITE_REG(0x11, 0x10F0);
++ timeout = jiffies + HZ;
++ while (READ_REG(0x10F4) != 0x77) {
++ msleep(1);
++ if (time_after(jiffies, timeout))
++ break;
++ }
++
++ if (READ_REG(0x10F4) != 0x77) {
++ dev_err(vpu_dev,
++ "fatal error: can't gate/power off when VPU is busy\n");
++ WRITE_REG(0x0, 0x10F0);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EFAULT;
++ } else
++ vpu_reset();
++ }
++#endif
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ vpu_free_buffers();
++
++ /* Free shared memory when vpu device is idle */
++ vpu_free_dma_buffer(&share_mem);
++ share_mem.cpu_addr = 0;
++ vfree((void *)vshare_mem.cpu_addr);
++ vshare_mem.cpu_addr = 0;
++
++ vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ atomic_dec(&clk_cnt_from_ioc);
++ }
++
++ vpu_power_up(false);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_put_sync_suspend(vpu_dev);
++#endif
++
++ }
++ mutex_unlock(&vpu_data.lock);
++
++ return 0;
++}
++
++/*!
++ * @brief fasync function for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_fasync(int fd, struct file *filp, int mode)
++{
++ struct vpu_priv *dev = (struct vpu_priv *)filp->private_data;
++ return fasync_helper(fd, filp, mode, &dev->async_queue);
++}
++
++/*!
++ * @brief memory map function of harware registers for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_hwregs(struct file *fp, struct vm_area_struct *vm)
++{
++ unsigned long pfn;
++
++ vm->vm_flags |= VM_IO | VM_RESERVED;
++ /*
++ * Since vpu registers have been mapped with ioremap() at probe
++ * which L_PTE_XN is 1, and the same physical address must be
++ * mapped multiple times with same type, so set L_PTE_XN to 1 here.
++ * Otherwise, there may be unexpected result in video codec.
++ */
++ vm->vm_page_prot = pgprot_noncachedxn(vm->vm_page_prot);
++ pfn = phy_vpu_base_addr >> PAGE_SHIFT;
++ dev_dbg(vpu_dev, "size=0x%x, page no.=0x%x\n",
++ (int)(vm->vm_end - vm->vm_start), (int)pfn);
++ return remap_pfn_range(vm, vm->vm_start, pfn, vm->vm_end - vm->vm_start,
++ vm->vm_page_prot) ? -EAGAIN : 0;
++}
++
++/*!
++ * @brief memory map function of memory for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_dma_mem(struct file *fp, struct vm_area_struct *vm)
++{
++ int request_size;
++ request_size = vm->vm_end - vm->vm_start;
++
++ dev_dbg(vpu_dev, "start=0x%x, pgoff=0x%x, size=0x%x\n",
++ (unsigned int)(vm->vm_start), (unsigned int)(vm->vm_pgoff),
++ request_size);
++
++ vm->vm_flags |= VM_IO | VM_RESERVED;
++ vm->vm_page_prot = pgprot_writecombine(vm->vm_page_prot);
++
++ return remap_pfn_range(vm, vm->vm_start, vm->vm_pgoff,
++ request_size, vm->vm_page_prot) ? -EAGAIN : 0;
++
++}
++
++/* !
++ * @brief memory map function of vmalloced share memory
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_map_vshare_mem(struct file *fp, struct vm_area_struct *vm)
++{
++ int ret = -EINVAL;
++
++ ret = remap_vmalloc_range(vm, (void *)(vm->vm_pgoff << PAGE_SHIFT), 0);
++ vm->vm_flags |= VM_IO;
++
++ return ret;
++}
++/*!
++ * @brief memory map interface for vpu file operation
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_mmap(struct file *fp, struct vm_area_struct *vm)
++{
++ unsigned long offset;
++
++ offset = vshare_mem.cpu_addr >> PAGE_SHIFT;
++
++ if (vm->vm_pgoff && (vm->vm_pgoff == offset))
++ return vpu_map_vshare_mem(fp, vm);
++ else if (vm->vm_pgoff)
++ return vpu_map_dma_mem(fp, vm);
++ else
++ return vpu_map_hwregs(fp, vm);
++}
++
++const struct file_operations vpu_fops = {
++ .owner = THIS_MODULE,
++ .open = vpu_open,
++ .unlocked_ioctl = vpu_ioctl,
++ .release = vpu_release,
++ .fasync = vpu_fasync,
++ .mmap = vpu_mmap,
++};
++
++/*!
++ * This function is called by the driver framework to initialize the vpu device.
++ * @param dev The device structure for the vpu passed in by the framework.
++ * @return 0 on success or negative error code on error
++ */
++static int vpu_dev_probe(struct platform_device *pdev)
++{
++ int err = 0;
++ struct device *temp_class;
++ struct resource *res;
++ unsigned long addr = 0;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ struct device_node *np = pdev->dev.of_node;
++ u32 iramsize;
++
++ err = of_property_read_u32(np, "iramsize", (u32 *)&iramsize);
++ if (!err && iramsize)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ {
++ iram_pool = of_get_named_gen_pool(np, "iram", 0);
++ if (!iram_pool) {
++ dev_err(&pdev->dev, "iram pool not available\n");
++ return -ENOMEM;
++ }
++
++ iram_base = gen_pool_alloc(iram_pool, iramsize);
++ if (!iram_base) {
++ dev_err(&pdev->dev, "unable to alloc iram\n");
++ return -ENOMEM;
++ }
++
++ addr = gen_pool_virt_to_phys(iram_pool, iram_base);
++ }
++#else
++ iram_alloc(iramsize, &addr);
++#endif
++ if (addr == 0)
++ iram.start = iram.end = 0;
++ else {
++ iram.start = addr;
++ iram.end = addr + iramsize - 1;
++ }
++#else
++
++ vpu_plat = pdev->dev.platform_data;
++
++ if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
++ iram_alloc(vpu_plat->iram_size, &addr);
++ if (addr == 0)
++ iram.start = iram.end = 0;
++ else {
++ iram.start = addr;
++ iram.end = addr + vpu_plat->iram_size - 1;
++ }
++#endif
++
++ vpu_dev = &pdev->dev;
++
++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu_regs");
++ if (!res) {
++ dev_err(vpu_dev, "vpu: unable to get vpu base addr\n");
++ return -ENODEV;
++ }
++ phy_vpu_base_addr = res->start;
++ vpu_base = ioremap(res->start, res->end - res->start);
++
++ vpu_major = register_chrdev(vpu_major, "mxc_vpu", &vpu_fops);
++ if (vpu_major < 0) {
++ dev_err(vpu_dev, "vpu: unable to get a major for VPU\n");
++ err = -EBUSY;
++ goto error;
++ }
++
++ vpu_class = class_create(THIS_MODULE, "mxc_vpu");
++ if (IS_ERR(vpu_class)) {
++ err = PTR_ERR(vpu_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(vpu_class, NULL, MKDEV(vpu_major, 0),
++ NULL, "mxc_vpu");
++ if (IS_ERR(temp_class)) {
++ err = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ vpu_clk = clk_get(&pdev->dev, "per");
++ if (IS_ERR(vpu_clk)) {
++ err = -ENOENT;
++ goto err_out_class;
++ }
++
++ vpu_ipi_irq = platform_get_irq_byname(pdev, "bit");
++ if (vpu_ipi_irq < 0) {
++ dev_err(vpu_dev, "vpu: unable to get vpu interrupt\n");
++ err = -ENXIO;
++ goto err_out_class;
++ }
++ err = request_irq(vpu_ipi_irq, vpu_ipi_irq_handler, 0, "VPU_CODEC_IRQ",
++ (void *)(&vpu_data));
++ if (err)
++ goto err_out_class;
++ if (vpu_power_get(true)) {
++ if (!(cpu_is_mx51() || cpu_is_mx53())) {
++ dev_err(vpu_dev, "failed to get vpu power\n");
++ goto err_out_class;
++ } else {
++ /* regulator_get will return error on MX5x,
++ * just igore it everywhere*/
++ dev_warn(vpu_dev, "failed to get vpu power\n");
++ }
++ }
++
++#ifdef MXC_VPU_HAS_JPU
++ vpu_jpu_irq = platform_get_irq_byname(pdev, "jpeg");
++ if (vpu_jpu_irq < 0) {
++ dev_err(vpu_dev, "vpu: unable to get vpu jpu interrupt\n");
++ err = -ENXIO;
++ free_irq(vpu_ipi_irq, &vpu_data);
++ goto err_out_class;
++ }
++ err = request_irq(vpu_jpu_irq, vpu_jpu_irq_handler, IRQF_TRIGGER_RISING,
++ "VPU_JPG_IRQ", (void *)(&vpu_data));
++ if (err) {
++ free_irq(vpu_ipi_irq, &vpu_data);
++ goto err_out_class;
++ }
++#endif
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_enable(&pdev->dev);
++#endif
++
++ vpu_data.workqueue = create_workqueue("vpu_wq");
++ INIT_WORK(&vpu_data.work, vpu_worker_callback);
++ mutex_init(&vpu_data.lock);
++ dev_info(vpu_dev, "VPU initialized\n");
++ goto out;
++
++err_out_class:
++ device_destroy(vpu_class, MKDEV(vpu_major, 0));
++ class_destroy(vpu_class);
++err_out_chrdev:
++ unregister_chrdev(vpu_major, "mxc_vpu");
++error:
++ iounmap(vpu_base);
++out:
++ return err;
++}
++
++static int vpu_dev_remove(struct platform_device *pdev)
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ pm_runtime_disable(&pdev->dev);
++#endif
++ free_irq(vpu_ipi_irq, &vpu_data);
++#ifdef MXC_VPU_HAS_JPU
++ free_irq(vpu_jpu_irq, &vpu_data);
++#endif
++ cancel_work_sync(&vpu_data.work);
++ flush_workqueue(vpu_data.workqueue);
++ destroy_workqueue(vpu_data.workqueue);
++
++ iounmap(vpu_base);
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ if (iram.start)
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
++ gen_pool_free(iram_pool, iram_base, iram.end-iram.start+1);
++#else
++ iram_free(iram.start, iram.end-iram.start+1);
++#endif
++#else
++ if (vpu_plat && vpu_plat->iram_enable && vpu_plat->iram_size)
++ iram_free(iram.start, vpu_plat->iram_size);
++#endif
++
++ vpu_power_get(false);
++ return 0;
++}
++
++#ifdef CONFIG_PM
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_suspend(struct device *dev)
++#else
++static int vpu_suspend(struct platform_device *pdev, pm_message_t state)
++#endif
++{
++ int i;
++ unsigned long timeout;
++
++ mutex_lock(&vpu_data.lock);
++ if (open_count == 0) {
++ /* VPU is released (all instances are freed),
++ * clock is already off, context is no longer needed,
++ * power is already off on MX6,
++ * gate power on MX51 */
++ if (cpu_is_mx51()) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(1);
++#endif
++ }
++ } else {
++ /* Wait for vpu go to idle state, suspect vpu cannot be changed
++ to idle state after about 1 sec */
++ timeout = jiffies + HZ;
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ while (READ_REG(BIT_BUSY_FLAG)) {
++ msleep(1);
++ if (time_after(jiffies, timeout)) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ mutex_unlock(&vpu_data.lock);
++ return -EAGAIN;
++ }
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++
++ /* Make sure clock is disabled before suspend */
++ vpu_clk_usercount = atomic_read(&clk_cnt_from_ioc);
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++ if (cpu_is_mx53()) {
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++ }
++
++ if (bitwork_mem.cpu_addr != 0) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ /* Save 64 registers from BIT_CODE_BUF_ADDR */
++ for (i = 0; i < 64; i++)
++ regBk[i] = READ_REG(BIT_CODE_BUF_ADDR + (i * 4));
++ pc_before_suspend = READ_REG(BIT_CUR_PC);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(1);
++#endif
++
++ /* If VPU is working before suspend, disable
++ * regulator to make usecount right. */
++ vpu_power_up(false);
++ }
++
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_resume(struct device *dev)
++#else
++static int vpu_resume(struct platform_device *pdev)
++#endif
++{
++ int i;
++
++ mutex_lock(&vpu_data.lock);
++ if (open_count == 0) {
++ /* VPU is released (all instances are freed),
++ * clock should be kept off, context is no longer needed,
++ * power should be kept off on MX6,
++ * disable power gating on MX51 */
++ if (cpu_is_mx51()) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(0);
++#endif
++ }
++ } else {
++ if (cpu_is_mx53())
++ goto recover_clk;
++
++ /* If VPU is working before suspend, enable
++ * regulator to make usecount right. */
++ vpu_power_up(true);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ if (vpu_plat->pg)
++ vpu_plat->pg(0);
++#endif
++
++ if (bitwork_mem.cpu_addr != 0) {
++ u32 *p = (u32 *) bitwork_mem.cpu_addr;
++ u32 data, pc;
++ u16 data_hi;
++ u16 data_lo;
++
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++
++ pc = READ_REG(BIT_CUR_PC);
++ if (pc) {
++ dev_warn(vpu_dev, "Not power off after suspend (PC=0x%x)\n", pc);
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ goto recover_clk;
++ }
++
++ /* Restore registers */
++ for (i = 0; i < 64; i++)
++ WRITE_REG(regBk[i], BIT_CODE_BUF_ADDR + (i * 4));
++
++ WRITE_REG(0x0, BIT_RESET_CTRL);
++ WRITE_REG(0x0, BIT_CODE_RUN);
++ /* MX6 RTL has a bug not to init MBC_SET_SUBBLK_EN on reset */
++#ifdef CONFIG_SOC_IMX6Q
++ WRITE_REG(0x0, MBC_SET_SUBBLK_EN);
++#endif
++
++ /*
++ * Re-load boot code, from the codebuffer in external RAM.
++ * Thankfully, we only need 4096 bytes, same for all platforms.
++ */
++ for (i = 0; i < 2048; i += 4) {
++ data = p[(i / 2) + 1];
++ data_hi = (data >> 16) & 0xFFFF;
++ data_lo = data & 0xFFFF;
++ WRITE_REG((i << 16) | data_hi, BIT_CODE_DOWN);
++ WRITE_REG(((i + 1) << 16) | data_lo,
++ BIT_CODE_DOWN);
++
++ data = p[i / 2];
++ data_hi = (data >> 16) & 0xFFFF;
++ data_lo = data & 0xFFFF;
++ WRITE_REG(((i + 2) << 16) | data_hi,
++ BIT_CODE_DOWN);
++ WRITE_REG(((i + 3) << 16) | data_lo,
++ BIT_CODE_DOWN);
++ }
++
++ if (pc_before_suspend) {
++ WRITE_REG(0x1, BIT_BUSY_FLAG);
++ WRITE_REG(0x1, BIT_CODE_RUN);
++ while (READ_REG(BIT_BUSY_FLAG))
++ ;
++ } else {
++ dev_warn(vpu_dev, "PC=0 before suspend\n");
++ }
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ }
++
++recover_clk:
++ /* Recover vpu clock */
++ for (i = 0; i < vpu_clk_usercount; i++) {
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ }
++ }
++
++ mutex_unlock(&vpu_data.lock);
++ return 0;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static int vpu_runtime_suspend(struct device *dev)
++{
++ dev_dbg(dev, "ipu busfreq high release.\n");
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int vpu_runtime_resume(struct device *dev)
++{
++ dev_dbg(dev, "ipu busfreq high request.\n");
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static const struct dev_pm_ops vpu_pm_ops = {
++ SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
++};
++#endif
++
++#else
++#define vpu_suspend NULL
++#define vpu_resume NULL
++#endif /* !CONFIG_PM */
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++static const struct of_device_id vpu_of_match[] = {
++ { .compatible = "fsl,imx6-vpu", },
++ { .compatible = "fsl,imx6q-vpu", },
++ {/* sentinel */}
++};
++MODULE_DEVICE_TABLE(of, vpu_of_match);
++#endif
++
++/*! Driver definition
++ *
++ */
++static struct platform_driver mxcvpu_driver = {
++ .driver = {
++ .name = "mxc_vpu",
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
++ .of_match_table = vpu_of_match,
++#ifdef CONFIG_PM
++ .pm = &vpu_pm_ops,
++#endif
++#endif
++ },
++ .probe = vpu_dev_probe,
++ .remove = vpu_dev_remove,
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ .suspend = vpu_suspend,
++ .resume = vpu_resume,
++#endif
++};
++
++static int __init vpu_init(void)
++{
++ int ret = platform_driver_register(&mxcvpu_driver);
++
++ init_waitqueue_head(&vpu_queue);
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
++ memblock_analyze();
++ top_address_DRAM = memblock_end_of_DRAM_with_reserved();
++#endif
++
++ return ret;
++}
++
++static void __exit vpu_exit(void)
++{
++ if (vpu_major > 0) {
++ device_destroy(vpu_class, MKDEV(vpu_major, 0));
++ class_destroy(vpu_class);
++ unregister_chrdev(vpu_major, "mxc_vpu");
++ vpu_major = 0;
++ }
++
++ vpu_free_dma_buffer(&bitwork_mem);
++ vpu_free_dma_buffer(&pic_para_mem);
++ vpu_free_dma_buffer(&user_data_mem);
++
++ /* reset VPU state */
++ vpu_power_up(true);
++ clk_prepare(vpu_clk);
++ clk_enable(vpu_clk);
++ vpu_reset();
++ clk_disable(vpu_clk);
++ clk_unprepare(vpu_clk);
++ vpu_power_up(false);
++
++ clk_put(vpu_clk);
++
++ platform_driver_unregister(&mxcvpu_driver);
++ return;
++}
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
++MODULE_LICENSE("GPL");
++
++module_init(vpu_init);
++module_exit(vpu_exit);
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c 2015-07-27 23:13:06.848547454 +0200
+@@ -1011,6 +1011,14 @@
+ return 0;
+ }
+
++static void brcmf_sdiod_host_fixup(struct mmc_host *host)
++{
++ /* runtime-pm powers off the device */
++ pm_runtime_forbid(host->parent);
++ /* avoid removal detection upon resume */
++ host->caps |= MMC_CAP_NONREMOVABLE;
++}
++
+ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
+ {
+ struct sdio_func *func;
+@@ -1076,7 +1084,7 @@
+ ret = -ENODEV;
+ goto out;
+ }
+- pm_runtime_forbid(host->parent);
++ brcmf_sdiod_host_fixup(host);
+ out:
+ if (ret)
+ brcmf_sdiod_remove(sdiodev);
+@@ -1246,15 +1254,15 @@
+ brcmf_sdiod_freezer_on(sdiodev);
+ brcmf_sdio_wd_timer(sdiodev->bus, 0);
+
++ sdio_flags = MMC_PM_KEEP_POWER;
+ if (sdiodev->wowl_enabled) {
+- sdio_flags = MMC_PM_KEEP_POWER;
+ if (sdiodev->pdata->oob_irq_supported)
+ enable_irq_wake(sdiodev->pdata->oob_irq_nr);
+ else
+- sdio_flags = MMC_PM_WAKE_SDIO_IRQ;
+- if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
+- brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
++ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ }
++ if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
++ brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
+ return 0;
+ }
+
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c 2015-07-27 23:13:06.848547454 +0200
+@@ -129,13 +129,47 @@
+ RATETAB_ENT(BRCM_RATE_54M, 0),
+ };
+
+-#define wl_a_rates (__wl_rates + 4)
+-#define wl_a_rates_size 8
+ #define wl_g_rates (__wl_rates + 0)
+-#define wl_g_rates_size 12
++#define wl_g_rates_size ARRAY_SIZE(__wl_rates)
++#define wl_a_rates (__wl_rates + 4)
++#define wl_a_rates_size (wl_g_rates_size - 4)
++
++#define CHAN2G(_channel, _freq) { \
++ .band = IEEE80211_BAND_2GHZ, \
++ .center_freq = (_freq), \
++ .hw_value = (_channel), \
++ .flags = IEEE80211_CHAN_DISABLED, \
++ .max_antenna_gain = 0, \
++ .max_power = 30, \
++}
++
++#define CHAN5G(_channel) { \
++ .band = IEEE80211_BAND_5GHZ, \
++ .center_freq = 5000 + (5 * (_channel)), \
++ .hw_value = (_channel), \
++ .flags = IEEE80211_CHAN_DISABLED, \
++ .max_antenna_gain = 0, \
++ .max_power = 30, \
++}
++
++static struct ieee80211_channel __wl_2ghz_channels[] = {
++ CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427),
++ CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447),
++ CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467),
++ CHAN2G(13, 2472), CHAN2G(14, 2484)
++};
++
++static struct ieee80211_channel __wl_5ghz_channels[] = {
++ CHAN5G(34), CHAN5G(36), CHAN5G(38), CHAN5G(40), CHAN5G(42),
++ CHAN5G(44), CHAN5G(46), CHAN5G(48), CHAN5G(52), CHAN5G(56),
++ CHAN5G(60), CHAN5G(64), CHAN5G(100), CHAN5G(104), CHAN5G(108),
++ CHAN5G(112), CHAN5G(116), CHAN5G(120), CHAN5G(124), CHAN5G(128),
++ CHAN5G(132), CHAN5G(136), CHAN5G(140), CHAN5G(144), CHAN5G(149),
++ CHAN5G(153), CHAN5G(157), CHAN5G(161), CHAN5G(165)
++};
+
+ /* Band templates duplicated per wiphy. The channel info
+- * is filled in after querying the device.
++ * above is added to the band during setup.
+ */
+ static const struct ieee80211_supported_band __wl_band_2ghz = {
+ .band = IEEE80211_BAND_2GHZ,
+@@ -143,7 +177,7 @@
+ .n_bitrates = wl_g_rates_size,
+ };
+
+-static const struct ieee80211_supported_band __wl_band_5ghz_a = {
++static const struct ieee80211_supported_band __wl_band_5ghz = {
+ .band = IEEE80211_BAND_5GHZ,
+ .bitrates = wl_a_rates,
+ .n_bitrates = wl_a_rates_size,
+@@ -5253,40 +5287,6 @@
+ return err;
+ }
+
+-/* Filter the list of channels received from firmware counting only
+- * the 20MHz channels. The wiphy band data only needs those which get
+- * flagged to indicate if they can take part in higher bandwidth.
+- */
+-static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
+- struct brcmf_chanspec_list *chlist,
+- u32 chcnt[])
+-{
+- u32 total = le32_to_cpu(chlist->count);
+- struct brcmu_chan ch;
+- int i;
+-
+- for (i = 0; i < total; i++) {
+- ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
+- cfg->d11inf.decchspec(&ch);
+-
+- /* Firmware gives a ordered list. We skip non-20MHz
+- * channels is 2G. For 5G we can abort upon reaching
+- * a non-20MHz channel in the list.
+- */
+- if (ch.bw != BRCMU_CHAN_BW_20) {
+- if (ch.band == BRCMU_CHAN_BAND_5G)
+- break;
+- else
+- continue;
+- }
+-
+- if (ch.band == BRCMU_CHAN_BAND_2G)
+- chcnt[0] += 1;
+- else if (ch.band == BRCMU_CHAN_BAND_5G)
+- chcnt[1] += 1;
+- }
+-}
+-
+ static void brcmf_update_bw40_channel_flag(struct ieee80211_channel *channel,
+ struct brcmu_chan *ch)
+ {
+@@ -5322,7 +5322,6 @@
+ u32 i, j;
+ u32 total;
+ u32 chaninfo;
+- u32 chcnt[2] = { 0, 0 };
+ u32 index;
+
+ pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
+@@ -5339,42 +5338,15 @@
+ goto fail_pbuf;
+ }
+
+- brcmf_count_20mhz_channels(cfg, list, chcnt);
+ wiphy = cfg_to_wiphy(cfg);
+- if (chcnt[0]) {
+- band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
+- GFP_KERNEL);
+- if (band == NULL) {
+- err = -ENOMEM;
+- goto fail_pbuf;
+- }
+- band->channels = kcalloc(chcnt[0], sizeof(*channel),
+- GFP_KERNEL);
+- if (band->channels == NULL) {
+- kfree(band);
+- err = -ENOMEM;
+- goto fail_pbuf;
+- }
+- band->n_channels = 0;
+- wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+- }
+- if (chcnt[1]) {
+- band = kmemdup(&__wl_band_5ghz_a, sizeof(__wl_band_5ghz_a),
+- GFP_KERNEL);
+- if (band == NULL) {
+- err = -ENOMEM;
+- goto fail_band2g;
+- }
+- band->channels = kcalloc(chcnt[1], sizeof(*channel),
+- GFP_KERNEL);
+- if (band->channels == NULL) {
+- kfree(band);
+- err = -ENOMEM;
+- goto fail_band2g;
+- }
+- band->n_channels = 0;
+- wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+- }
++ band = wiphy->bands[IEEE80211_BAND_2GHZ];
++ if (band)
++ for (i = 0; i < band->n_channels; i++)
++ band->channels[i].flags = IEEE80211_CHAN_DISABLED;
++ band = wiphy->bands[IEEE80211_BAND_5GHZ];
++ if (band)
++ for (i = 0; i < band->n_channels; i++)
++ band->channels[i].flags = IEEE80211_CHAN_DISABLED;
+
+ total = le32_to_cpu(list->count);
+ for (i = 0; i < total; i++) {
+@@ -5389,6 +5361,8 @@
+ brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
+ continue;
+ }
++ if (!band)
++ continue;
+ if (!(bw_cap[band->band] & WLC_BW_40MHZ_BIT) &&
+ ch.bw == BRCMU_CHAN_BW_40)
+ continue;
+@@ -5416,9 +5390,9 @@
+ } else if (ch.bw == BRCMU_CHAN_BW_40) {
+ brcmf_update_bw40_channel_flag(&channel[index], &ch);
+ } else {
+- /* disable other bandwidths for now as mentioned
+- * order assure they are enabled for subsequent
+- * chanspecs.
++ /* enable the channel and disable other bandwidths
++ * for now as mentioned order assure they are enabled
++ * for subsequent chanspecs.
+ */
+ channel[index].flags = IEEE80211_CHAN_NO_HT40 |
+ IEEE80211_CHAN_NO_80MHZ;
+@@ -5437,16 +5411,8 @@
+ IEEE80211_CHAN_NO_IR;
+ }
+ }
+- if (index == band->n_channels)
+- band->n_channels++;
+ }
+- kfree(pbuf);
+- return 0;
+
+-fail_band2g:
+- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
+- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
+- wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+ fail_pbuf:
+ kfree(pbuf);
+ return err;
+@@ -5779,7 +5745,12 @@
+
+ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
+ {
++ struct ieee80211_supported_band *band;
+ struct ieee80211_iface_combination ifc_combo;
++ __le32 bandlist[3];
++ u32 n_bands;
++ int err, i;
++
+ wiphy->max_scan_ssids = WL_NUM_SCAN_MAX;
+ wiphy->max_scan_ie_len = BRCMF_SCAN_IE_LEN_MAX;
+ wiphy->max_num_pmkids = WL_NUM_PMKIDS_MAX;
+@@ -5812,7 +5783,8 @@
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+ wiphy->mgmt_stypes = brcmf_txrx_stypes;
+ wiphy->max_remain_on_channel_duration = 5000;
+- brcmf_wiphy_pno_params(wiphy);
++ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO))
++ brcmf_wiphy_pno_params(wiphy);
+
+ /* vendor commands/events support */
+ wiphy->vendor_commands = brcmf_vendor_cmds;
+@@ -5821,7 +5793,52 @@
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL))
+ brcmf_wiphy_wowl_params(wiphy);
+
+- return brcmf_setup_wiphybands(wiphy);
++ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_BANDLIST, &bandlist,
++ sizeof(bandlist));
++ if (err) {
++ brcmf_err("could not obtain band info: err=%d\n", err);
++ return err;
++ }
++ /* first entry in bandlist is number of bands */
++ n_bands = le32_to_cpu(bandlist[0]);
++ for (i = 1; i <= n_bands && i < ARRAY_SIZE(bandlist); i++) {
++ if (bandlist[i] == cpu_to_le32(WLC_BAND_2G)) {
++ band = kmemdup(&__wl_band_2ghz, sizeof(__wl_band_2ghz),
++ GFP_KERNEL);
++ if (!band)
++ return -ENOMEM;
++
++ band->channels = kmemdup(&__wl_2ghz_channels,
++ sizeof(__wl_2ghz_channels),
++ GFP_KERNEL);
++ if (!band->channels) {
++ kfree(band);
++ return -ENOMEM;
++ }
++
++ band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
++ wiphy->bands[IEEE80211_BAND_2GHZ] = band;
++ }
++ if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
++ band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
++ GFP_KERNEL);
++ if (!band)
++ return -ENOMEM;
++
++ band->channels = kmemdup(&__wl_5ghz_channels,
++ sizeof(__wl_5ghz_channels),
++ GFP_KERNEL);
++ if (!band->channels) {
++ kfree(band);
++ return -ENOMEM;
++ }
++
++ band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
++ wiphy->bands[IEEE80211_BAND_5GHZ] = band;
++ }
++ }
++ err = brcmf_setup_wiphybands(wiphy);
++ return err;
+ }
+
+ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
+@@ -5998,7 +6015,15 @@
+ brcmf_dbg(TRACE, "enter: initiator=%d, alpha=%c%c\n", req->initiator,
+ req->alpha2[0], req->alpha2[1]);
+
+- /* ignore non-ISO3166 country codes */
++ /* ignore non-ISO3166 country codes
++ * don't report an error on 00 the world roaming
++ * designator as the firmware don't support it
++ * but there is no reason to pass that info to userspace
++ */
++
++ if (req->alpha2[0] == '0' && req->alpha2[1] == '0')
++ return;
++
+ for (i = 0; i < sizeof(req->alpha2); i++)
+ if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') {
+ brcmf_err("not a ISO3166 code\n");
+@@ -6007,11 +6032,18 @@
+ memset(&ccreq, 0, sizeof(ccreq));
+ ccreq.rev = cpu_to_le32(-1);
+ memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2));
+- brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq));
++ if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) {
++ brcmf_err("firmware rejected country setting\n");
++ return;
++ }
++ brcmf_setup_wiphybands(wiphy);
+ }
+
+ static void brcmf_free_wiphy(struct wiphy *wiphy)
+ {
++ if (!wiphy)
++ return;
++
+ kfree(wiphy->iface_combinations);
+ if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
+ kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/commonring.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/commonring.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/commonring.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/commonring.c 2015-07-27 23:13:06.848547454 +0200
+@@ -22,17 +22,6 @@
+ #include "core.h"
+ #include "commonring.h"
+
+-
+-/* dma flushing needs implementation for mips and arm platforms. Should
+- * be put in util. Note, this is not real flushing. It is virtual non
+- * cached memory. Only write buffers should have to be drained. Though
+- * this may be different depending on platform......
+- * SEE ALSO msgbuf.c
+- */
+-#define brcmf_dma_flush(addr, len)
+-#define brcmf_dma_invalidate_cache(addr, len)
+-
+-
+ void brcmf_commonring_register_cb(struct brcmf_commonring *commonring,
+ int (*cr_ring_bell)(void *ctx),
+ int (*cr_update_rptr)(void *ctx),
+@@ -206,14 +195,9 @@
+ address = commonring->buf_addr;
+ address += (commonring->f_ptr * commonring->item_len);
+ if (commonring->f_ptr > commonring->w_ptr) {
+- brcmf_dma_flush(address,
+- (commonring->depth - commonring->f_ptr) *
+- commonring->item_len);
+ address = commonring->buf_addr;
+ commonring->f_ptr = 0;
+ }
+- brcmf_dma_flush(address, (commonring->w_ptr - commonring->f_ptr) *
+- commonring->item_len);
+
+ commonring->f_ptr = commonring->w_ptr;
+
+@@ -258,8 +242,6 @@
+ if (commonring->r_ptr == commonring->depth)
+ commonring->r_ptr = 0;
+
+- brcmf_dma_invalidate_cache(ret_addr, *n_ items * commonring->item_len);
+-
+ return ret_addr;
+ }
+
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/core.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/core.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/core.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/core.c 2015-07-27 23:13:06.848547454 +0200
+@@ -791,15 +791,15 @@
+ * in case we missed the BRCMF_E_IF_DEL event.
+ */
+ if (ifp) {
+- brcmf_err("ERROR: netdev:%s already exists\n",
+- ifp->ndev->name);
+ if (ifidx) {
++ brcmf_err("ERROR: netdev:%s already exists, deleting old interface\n",
++ ifp->ndev->name);
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ drvr->iflist[bssidx] = NULL;
+ } else {
+- brcmf_err("ignore IF event\n");
++ brcmf_dbg(EVENT, "ignore IF event\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/feature.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/feature.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/feature.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/feature.c 2015-07-27 23:13:06.848547454 +0200
+@@ -124,9 +124,11 @@
+ struct brcmf_if *ifp = drvr->iflist[0];
+
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
++ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn");
+ if (drvr->bus_if->wowl_supported)
+ brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
+- if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
++ if ((drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID) &&
++ (drvr->bus_if->chip != BRCM_CC_4330_CHIP_ID))
+ brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
+
+ /* set chip related quirks */
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/feature.h linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/feature.h
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/feature.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/feature.h 2015-07-27 23:13:06.848547454 +0200
+@@ -19,11 +19,15 @@
+ /*
+ * Features:
+ *
++ * MBSS: multiple BSSID support (eg. guest network in AP mode).
+ * MCHAN: multi-channel for concurrent P2P.
++ * PNO: preferred network offload.
++ * WOWL: Wake-On-WLAN.
+ */
+ #define BRCMF_FEAT_LIST \
+ BRCMF_FEAT_DEF(MBSS) \
+ BRCMF_FEAT_DEF(MCHAN) \
++ BRCMF_FEAT_DEF(PNO) \
+ BRCMF_FEAT_DEF(WOWL)
+ /*
+ * Quirks:
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/firmware.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/firmware.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/firmware.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/firmware.c 2015-07-27 23:13:06.848547454 +0200
+@@ -23,6 +23,10 @@
+ #include "debug.h"
+ #include "firmware.h"
+
++#define BRCMF_FW_MAX_NVRAM_SIZE 64000
++#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
++#define BRCMF_FW_NVRAM_PCIEDEV_LEN 9 /* pcie/1/4/ */
++
+ char brcmf_firmware_path[BRCMF_FW_PATH_LEN];
+ module_param_string(firmware_path, brcmf_firmware_path,
+ BRCMF_FW_PATH_LEN, 0440);
+@@ -46,6 +50,8 @@
+ * @column: current column in line.
+ * @pos: byte offset in input buffer.
+ * @entry: start position of key,value entry.
++ * @multi_dev_v1: detect pcie multi device v1 (compressed).
++ * @multi_dev_v2: detect pcie multi device v2.
+ */
+ struct nvram_parser {
+ enum nvram_parser_state state;
+@@ -56,6 +62,8 @@
+ u32 column;
+ u32 pos;
+ u32 entry;
++ bool multi_dev_v1;
++ bool multi_dev_v2;
+ };
+
+ static bool is_nvram_char(char c)
+@@ -108,6 +116,10 @@
+ st = COMMENT;
+ else
+ st = VALUE;
++ if (strncmp(&nvp->fwnv->data[nvp->entry], "devpath", 7) == 0)
++ nvp->multi_dev_v1 = true;
++ if (strncmp(&nvp->fwnv->data[nvp->entry], "pcie/", 5) == 0)
++ nvp->multi_dev_v2 = true;
+ } else if (!is_nvram_char(c)) {
+ brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
+ nvp->line, nvp->column);
+@@ -133,6 +145,8 @@
+ ekv = (u8 *)&nvp->fwnv->data[nvp->pos];
+ skv = (u8 *)&nvp->fwnv->data[nvp->entry];
+ cplen = ekv - skv;
++ if (nvp->nvram_len + cplen + 1 >= BRCMF_FW_MAX_NVRAM_SIZE)
++ return END;
+ /* copy to output buffer */
+ memcpy(&nvp->nvram[nvp->nvram_len], skv, cplen);
+ nvp->nvram_len += cplen;
+@@ -180,10 +194,18 @@
+ static int brcmf_init_nvram_parser(struct nvram_parser *nvp,
+ const struct firmware *nv)
+ {
++ size_t size;
++
+ memset(nvp, 0, sizeof(*nvp));
+ nvp->fwnv = nv;
++ /* Limit size to MAX_NVRAM_SIZE, some files contain lot of comment */
++ if (nv->size > BRCMF_FW_MAX_NVRAM_SIZE)
++ size = BRCMF_FW_MAX_NVRAM_SIZE;
++ else
++ size = nv->size;
+ /* Alloc for extra 0 byte + roundup by 4 + length field */
+- nvp->nvram = kzalloc(nv->size + 1 + 3 + sizeof(u32), GFP_KERNEL);
++ size += 1 + 3 + sizeof(u32);
++ nvp->nvram = kzalloc(size, GFP_KERNEL);
+ if (!nvp->nvram)
+ return -ENOMEM;
+
+@@ -192,12 +214,136 @@
+ return 0;
+ }
+
++/* brcmf_fw_strip_multi_v1 :Some nvram files contain settings for multiple
++ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
++ * which data is to be returned. v1 is the version where nvram is stored
++ * compressed and "devpath" maps to index for valid entries.
++ */
++static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
++ u16 bus_nr)
++{
++ u32 i, j;
++ bool found;
++ u8 *nvram;
++ u8 id;
++
++ nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
++ if (!nvram)
++ goto fail;
++
++ /* min length: devpath0=pcie/1/4/ + 0:x=y */
++ if (nvp->nvram_len < BRCMF_FW_NVRAM_DEVPATH_LEN + 6)
++ goto fail;
++
++ /* First search for the devpathX and see if it is the configuration
++ * for domain_nr/bus_nr. Search complete nvp
++ */
++ found = false;
++ i = 0;
++ while (i < nvp->nvram_len - BRCMF_FW_NVRAM_DEVPATH_LEN) {
++ /* Format: devpathX=pcie/Y/Z/
++ * Y = domain_nr, Z = bus_nr, X = virtual ID
++ */
++ if ((strncmp(&nvp->nvram[i], "devpath", 7) == 0) &&
++ (strncmp(&nvp->nvram[i + 8], "=pcie/", 6) == 0)) {
++ if (((nvp->nvram[i + 14] - '0') == domain_nr) &&
++ ((nvp->nvram[i + 16] - '0') == bus_nr)) {
++ id = nvp->nvram[i + 7] - '0';
++ found = true;
++ break;
++ }
++ }
++ while (nvp->nvram[i] != 0)
++ i++;
++ i++;
++ }
++ if (!found)
++ goto fail;
++
++ /* Now copy all valid entries, release old nvram and assign new one */
++ i = 0;
++ j = 0;
++ while (i < nvp->nvram_len) {
++ if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
++ i += 2;
++ while (nvp->nvram[i] != 0) {
++ nvram[j] = nvp->nvram[i];
++ i++;
++ j++;
++ }
++ nvram[j] = 0;
++ j++;
++ }
++ while (nvp->nvram[i] != 0)
++ i++;
++ i++;
++ }
++ kfree(nvp->nvram);
++ nvp->nvram = nvram;
++ nvp->nvram_len = j;
++ return;
++
++fail:
++ kfree(nvram);
++ nvp->nvram_len = 0;
++}
++
++/* brcmf_fw_strip_multi_v2 :Some nvram files contain settings for multiple
++ * devices. Strip it down for one device, use domain_nr/bus_nr to determine
++ * which data is to be returned. v2 is the version where nvram is stored
++ * uncompressed, all relevant valid entries are identified by
++ * pcie/domain_nr/bus_nr:
++ */
++static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
++ u16 bus_nr)
++{
++ u32 i, j;
++ u8 *nvram;
++
++ nvram = kzalloc(nvp->nvram_len + 1 + 3 + sizeof(u32), GFP_KERNEL);
++ if (!nvram)
++ goto fail;
++
++ /* Copy all valid entries, release old nvram and assign new one.
++ * Valid entries are of type pcie/X/Y/ where X = domain_nr and
++ * Y = bus_nr.
++ */
++ i = 0;
++ j = 0;
++ while (i < nvp->nvram_len - BRCMF_FW_NVRAM_PCIEDEV_LEN) {
++ if ((strncmp(&nvp->nvram[i], "pcie/", 5) == 0) &&
++ (nvp->nvram[i + 6] == '/') && (nvp->nvram[i + 8] == '/') &&
++ ((nvp->nvram[i + 5] - '0') == domain_nr) &&
++ ((nvp->nvram[i + 7] - '0') == bus_nr)) {
++ i += BRCMF_FW_NVRAM_PCIEDEV_LEN;
++ while (nvp->nvram[i] != 0) {
++ nvram[j] = nvp->nvram[i];
++ i++;
++ j++;
++ }
++ nvram[j] = 0;
++ j++;
++ }
++ while (nvp->nvram[i] != 0)
++ i++;
++ i++;
++ }
++ kfree(nvp->nvram);
++ nvp->nvram = nvram;
++ nvp->nvram_len = j;
++ return;
++fail:
++ kfree(nvram);
++ nvp->nvram_len = 0;
++}
++
+ /* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
+ * and ending in a NUL. Removes carriage returns, empty lines, comment lines,
+ * and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
+ * End of buffer is completed with token identifying length of buffer.
+ */
+-static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length)
++static void *brcmf_fw_nvram_strip(const struct firmware *nv, u32 *new_length,
++ u16 domain_nr, u16 bus_nr)
+ {
+ struct nvram_parser nvp;
+ u32 pad;
+@@ -212,6 +358,16 @@
+ if (nvp.state == END)
+ break;
+ }
++ if (nvp.multi_dev_v1)
++ brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
++ else if (nvp.multi_dev_v2)
++ brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
++
++ if (nvp.nvram_len == 0) {
++ kfree(nvp.nvram);
++ return NULL;
++ }
++
+ pad = nvp.nvram_len;
+ *new_length = roundup(nvp.nvram_len + 1, 4);
+ while (pad != *new_length) {
+@@ -239,6 +395,8 @@
+ u16 flags;
+ const struct firmware *code;
+ const char *nvram_name;
++ u16 domain_nr;
++ u16 bus_nr;
+ void (*done)(struct device *dev, const struct firmware *fw,
+ void *nvram_image, u32 nvram_len);
+ };
+@@ -254,7 +412,8 @@
+ goto fail;
+
+ if (fw) {
+- nvram = brcmf_fw_nvram_strip(fw, &nvram_length);
++ nvram = brcmf_fw_nvram_strip(fw, &nvram_length,
++ fwctx->domain_nr, fwctx->bus_nr);
+ release_firmware(fw);
+ if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL))
+ goto fail;
+@@ -309,11 +468,12 @@
+ kfree(fwctx);
+ }
+
+-int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+- const char *code, const char *nvram,
+- void (*fw_cb)(struct device *dev,
+- const struct firmware *fw,
+- void *nvram_image, u32 nvram_len))
++int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
++ const char *code, const char *nvram,
++ void (*fw_cb)(struct device *dev,
++ const struct firmware *fw,
++ void *nvram_image, u32 nvram_len),
++ u16 domain_nr, u16 bus_nr)
+ {
+ struct brcmf_fw *fwctx;
+
+@@ -333,8 +493,21 @@
+ fwctx->done = fw_cb;
+ if (flags & BRCMF_FW_REQUEST_NVRAM)
+ fwctx->nvram_name = nvram;
++ fwctx->domain_nr = domain_nr;
++ fwctx->bus_nr = bus_nr;
+
+ return request_firmware_nowait(THIS_MODULE, true, code, dev,
+ GFP_KERNEL, fwctx,
+ brcmf_fw_request_code_done);
+ }
++
++int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
++ const char *code, const char *nvram,
++ void (*fw_cb)(struct device *dev,
++ const struct firmware *fw,
++ void *nvram_image, u32 nvram_len))
++{
++ return brcmf_fw_get_firmwares_pcie(dev, flags, code, nvram, fw_cb, 0,
++ 0);
++}
++
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/firmware.h linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/firmware.h
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/firmware.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/firmware.h 2015-07-27 23:13:06.848547454 +0200
+@@ -32,6 +32,12 @@
+ * fails it will not use the callback, but call device_release_driver()
+ * instead which will call the driver .remove() callback.
+ */
++int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags,
++ const char *code, const char *nvram,
++ void (*fw_cb)(struct device *dev,
++ const struct firmware *fw,
++ void *nvram_image, u32 nvram_len),
++ u16 domain_nr, u16 bus_nr);
+ int brcmf_fw_get_firmwares(struct device *dev, u16 flags,
+ const char *code, const char *nvram,
+ void (*fw_cb)(struct device *dev,
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/flowring.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/flowring.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/flowring.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/flowring.c 2015-07-27 23:13:06.848547454 +0200
+@@ -249,8 +249,8 @@
+ }
+
+
+-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+- struct sk_buff *skb)
++u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
++ struct sk_buff *skb)
+ {
+ struct brcmf_flowring_ring *ring;
+
+@@ -271,6 +271,7 @@
+ if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
+ brcmf_flowring_block(flow, flowid, false);
+ }
++ return skb_queue_len(&ring->skblist);
+ }
+
+
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/flowring.h linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/flowring.h
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/flowring.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/flowring.h 2015-07-27 23:13:06.848547454 +0200
+@@ -64,8 +64,8 @@
+ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid);
+ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid);
+ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid);
+-void brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
+- struct sk_buff *skb);
++u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid,
++ struct sk_buff *skb);
+ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid);
+ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid,
+ struct sk_buff *skb);
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c 2015-07-27 23:13:06.860504792 +0200
+@@ -635,7 +635,7 @@
+ return 0;
+ }
+
+-static int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
++static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
+ u32 slot_id, struct sk_buff **pktout,
+ bool remove_item)
+ {
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c 2015-07-27 23:13:06.860504792 +0200
+@@ -73,7 +73,7 @@
+ #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
+ #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
+
+-#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64
++#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96
+ #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
+
+ struct msgbuf_common_hdr {
+@@ -278,16 +278,6 @@
+ struct brcmf_msgbuf_pktid *array;
+ };
+
+-
+-/* dma flushing needs implementation for mips and arm platforms. Should
+- * be put in util. Note, this is not real flushing. It is virtual non
+- * cached memory. Only write buffers should have to be drained. Though
+- * this may be different depending on platform......
+- */
+-#define brcmf_dma_flush(addr, len)
+-#define brcmf_dma_invalidate_cache(addr, len)
+-
+-
+ static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
+
+
+@@ -462,7 +452,6 @@
+ memcpy(msgbuf->ioctbuf, buf, buf_len);
+ else
+ memset(msgbuf->ioctbuf, 0, buf_len);
+- brcmf_dma_flush(ioctl_buf, buf_len);
+
+ err = brcmf_commonring_write_complete(commonring);
+ brcmf_commonring_unlock(commonring);
+@@ -795,6 +784,8 @@
+ struct brcmf_flowring *flow = msgbuf->flow;
+ struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ u32 flowid;
++ u32 queue_count;
++ bool force;
+
+ flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
+ if (flowid == BRCMF_FLOWRING_INVALID_ID) {
+@@ -802,8 +793,9 @@
+ if (flowid == BRCMF_FLOWRING_INVALID_ID)
+ return -ENOMEM;
+ }
+- brcmf_flowring_enqueue(flow, flowid, skb);
+- brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false);
++ queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
++ force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
++ brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
+
+ return 0;
+ }
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/of.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/of.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/of.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/of.c 2015-07-27 23:13:06.868476350 +0200
+@@ -39,10 +39,16 @@
+ if (!sdiodev->pdata)
+ return;
+
++ if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
++ sdiodev->pdata->drive_strength = val;
++
++ /* make sure there are interrupts defined in the node */
++ if (!of_find_property(np, "interrupts", NULL))
++ return;
++
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ brcmf_err("interrupt could not be mapped\n");
+- devm_kfree(dev, sdiodev->pdata);
+ return;
+ }
+ irqf = irqd_get_trigger_type(irq_get_irq_data(irq));
+@@ -50,7 +56,4 @@
+ sdiodev->pdata->oob_irq_supported = true;
+ sdiodev->pdata->oob_irq_nr = irq;
+ sdiodev->pdata->oob_irq_flags = irqf;
+-
+- if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
+- sdiodev->pdata->drive_strength = val;
+ }
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/pcie.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/pcie.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/pcie.c 2015-07-27 23:13:06.868476350 +0200
+@@ -110,10 +110,12 @@
+ BRCMF_PCIE_MB_INT_D2H3_DB0 | \
+ BRCMF_PCIE_MB_INT_D2H3_DB1)
+
+-#define BRCMF_PCIE_MIN_SHARED_VERSION 4
++#define BRCMF_PCIE_MIN_SHARED_VERSION 5
+ #define BRCMF_PCIE_MAX_SHARED_VERSION 5
+ #define BRCMF_PCIE_SHARED_VERSION_MASK 0x00FF
+ #define BRCMF_PCIE_SHARED_TXPUSH_SUPPORT 0x4000
++#define BRCMF_PCIE_SHARED_DMA_INDEX 0x10000
++#define BRCMF_PCIE_SHARED_DMA_2B_IDX 0x100000
+
+ #define BRCMF_PCIE_FLAGS_HTOD_SPLIT 0x4000
+ #define BRCMF_PCIE_FLAGS_DTOH_SPLIT 0x8000
+@@ -145,6 +147,10 @@
+ #define BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET 8
+ #define BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET 12
+ #define BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET 16
++#define BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET 20
++#define BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET 28
++#define BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET 36
++#define BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET 44
+ #define BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET 0
+ #define BRCMF_SHARED_RING_MAX_SUB_QUEUES 52
+
+@@ -244,6 +250,13 @@
+ bool mbdata_completed;
+ bool irq_allocated;
+ bool wowl_enabled;
++ u8 dma_idx_sz;
++ void *idxbuf;
++ u32 idxbuf_sz;
++ dma_addr_t idxbuf_dmahandle;
++ u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset);
++ void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
++ u16 value);
+ };
+
+ struct brcmf_pcie_ringbuf {
+@@ -273,15 +286,6 @@
+ };
+
+
+-/* dma flushing needs implementation for mips and arm platforms. Should
+- * be put in util. Note, this is not real flushing. It is virtual non
+- * cached memory. Only write buffers should have to be drained. Though
+- * this may be different depending on platform......
+- */
+-#define brcmf_dma_flush(addr, len)
+-#define brcmf_dma_invalidate_cache(addr, len)
+-
+-
+ static u32
+ brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
+ {
+@@ -329,6 +333,25 @@
+ }
+
+
++static u16
++brcmf_pcie_read_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
++{
++ u16 *address = devinfo->idxbuf + mem_offset;
++
++ return (*(address));
++}
++
++
++static void
++brcmf_pcie_write_idx(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
++ u16 value)
++{
++ u16 *address = devinfo->idxbuf + mem_offset;
++
++ *(address) = value;
++}
++
++
+ static u32
+ brcmf_pcie_read_tcm32(struct brcmf_pciedev_info *devinfo, u32 mem_offset)
+ {
+@@ -874,7 +897,7 @@
+ brcmf_dbg(PCIE, "W r_ptr %d (%d), ring %d\n", commonring->r_ptr,
+ commonring->w_ptr, ring->id);
+
+- brcmf_pcie_write_tcm16(devinfo, ring->r_idx_addr, commonring->r_ptr);
++ devinfo->write_ptr(devinfo, ring->r_idx_addr, commonring->r_ptr);
+
+ return 0;
+ }
+@@ -892,7 +915,7 @@
+ brcmf_dbg(PCIE, "W w_ptr %d (%d), ring %d\n", commonring->w_ptr,
+ commonring->r_ptr, ring->id);
+
+- brcmf_pcie_write_tcm16(devinfo, ring->w_idx_addr, commonring->w_ptr);
++ devinfo->write_ptr(devinfo, ring->w_idx_addr, commonring->w_ptr);
+
+ return 0;
+ }
+@@ -921,7 +944,7 @@
+ if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+ return -EIO;
+
+- commonring->r_ptr = brcmf_pcie_read_tcm16(devinfo, ring->r_idx_addr);
++ commonring->r_ptr = devinfo->read_ptr(devinfo, ring->r_idx_addr);
+
+ brcmf_dbg(PCIE, "R r_ptr %d (%d), ring %d\n", commonring->r_ptr,
+ commonring->w_ptr, ring->id);
+@@ -939,7 +962,7 @@
+ if (devinfo->state != BRCMFMAC_PCIE_STATE_UP)
+ return -EIO;
+
+- commonring->w_ptr = brcmf_pcie_read_tcm16(devinfo, ring->w_idx_addr);
++ commonring->w_ptr = devinfo->read_ptr(devinfo, ring->w_idx_addr);
+
+ brcmf_dbg(PCIE, "R w_ptr %d (%d), ring %d\n", commonring->w_ptr,
+ commonring->r_ptr, ring->id);
+@@ -1044,6 +1067,13 @@
+ }
+ kfree(devinfo->shared.flowrings);
+ devinfo->shared.flowrings = NULL;
++ if (devinfo->idxbuf) {
++ dma_free_coherent(&devinfo->pdev->dev,
++ devinfo->idxbuf_sz,
++ devinfo->idxbuf,
++ devinfo->idxbuf_dmahandle);
++ devinfo->idxbuf = NULL;
++ }
+ }
+
+
+@@ -1059,19 +1089,72 @@
+ u32 addr;
+ u32 ring_mem_ptr;
+ u32 i;
++ u64 address;
++ u32 bufsz;
+ u16 max_sub_queues;
++ u8 idx_offset;
+
+ ring_addr = devinfo->shared.ring_info_addr;
+ brcmf_dbg(PCIE, "Base ring addr = 0x%08x\n", ring_addr);
++ addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
++ max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
+
+- addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
+- d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+- addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
+- d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+- addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
+- h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+- addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
+- h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
++ if (devinfo->dma_idx_sz != 0) {
++ bufsz = (BRCMF_NROF_D2H_COMMON_MSGRINGS + max_sub_queues) *
++ devinfo->dma_idx_sz * 2;
++ devinfo->idxbuf = dma_alloc_coherent(&devinfo->pdev->dev, bufsz,
++ &devinfo->idxbuf_dmahandle,
++ GFP_KERNEL);
++ if (!devinfo->idxbuf)
++ devinfo->dma_idx_sz = 0;
++ }
++
++ if (devinfo->dma_idx_sz == 0) {
++ addr = ring_addr + BRCMF_SHARED_RING_D2H_W_IDX_PTR_OFFSET;
++ d2h_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
++ addr = ring_addr + BRCMF_SHARED_RING_D2H_R_IDX_PTR_OFFSET;
++ d2h_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
++ addr = ring_addr + BRCMF_SHARED_RING_H2D_W_IDX_PTR_OFFSET;
++ h2d_w_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
++ addr = ring_addr + BRCMF_SHARED_RING_H2D_R_IDX_PTR_OFFSET;
++ h2d_r_idx_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
++ idx_offset = sizeof(u32);
++ devinfo->write_ptr = brcmf_pcie_write_tcm16;
++ devinfo->read_ptr = brcmf_pcie_read_tcm16;
++ brcmf_dbg(PCIE, "Using TCM indices\n");
++ } else {
++ memset(devinfo->idxbuf, 0, bufsz);
++ devinfo->idxbuf_sz = bufsz;
++ idx_offset = devinfo->dma_idx_sz;
++ devinfo->write_ptr = brcmf_pcie_write_idx;
++ devinfo->read_ptr = brcmf_pcie_read_idx;
++
++ h2d_w_idx_ptr = 0;
++ addr = ring_addr + BRCMF_SHARED_RING_H2D_WP_HADDR_OFFSET;
++ address = (u64)devinfo->idxbuf_dmahandle;
++ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
++ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
++
++ h2d_r_idx_ptr = h2d_w_idx_ptr + max_sub_queues * idx_offset;
++ addr = ring_addr + BRCMF_SHARED_RING_H2D_RP_HADDR_OFFSET;
++ address += max_sub_queues * idx_offset;
++ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
++ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
++
++ d2h_w_idx_ptr = h2d_r_idx_ptr + max_sub_queues * idx_offset;
++ addr = ring_addr + BRCMF_SHARED_RING_D2H_WP_HADDR_OFFSET;
++ address += max_sub_queues * idx_offset;
++ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
++ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
++
++ d2h_r_idx_ptr = d2h_w_idx_ptr +
++ BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
++ addr = ring_addr + BRCMF_SHARED_RING_D2H_RP_HADDR_OFFSET;
++ address += BRCMF_NROF_D2H_COMMON_MSGRINGS * idx_offset;
++ brcmf_pcie_write_tcm32(devinfo, addr, address & 0xffffffff);
++ brcmf_pcie_write_tcm32(devinfo, addr + 4, address >> 32);
++ brcmf_dbg(PCIE, "Using host memory indices\n");
++ }
+
+ addr = ring_addr + BRCMF_SHARED_RING_TCM_MEMLOC_OFFSET;
+ ring_mem_ptr = brcmf_pcie_read_tcm32(devinfo, addr);
+@@ -1085,8 +1168,8 @@
+ ring->id = i;
+ devinfo->shared.commonrings[i] = ring;
+
+- h2d_w_idx_ptr += sizeof(u32);
+- h2d_r_idx_ptr += sizeof(u32);
++ h2d_w_idx_ptr += idx_offset;
++ h2d_r_idx_ptr += idx_offset;
+ ring_mem_ptr += BRCMF_RING_MEM_SZ;
+ }
+
+@@ -1100,13 +1183,11 @@
+ ring->id = i;
+ devinfo->shared.commonrings[i] = ring;
+
+- d2h_w_idx_ptr += sizeof(u32);
+- d2h_r_idx_ptr += sizeof(u32);
++ d2h_w_idx_ptr += idx_offset;
++ d2h_r_idx_ptr += idx_offset;
+ ring_mem_ptr += BRCMF_RING_MEM_SZ;
+ }
+
+- addr = ring_addr + BRCMF_SHARED_RING_MAX_SUB_QUEUES;
+- max_sub_queues = brcmf_pcie_read_tcm16(devinfo, addr);
+ devinfo->shared.nrof_flowrings =
+ max_sub_queues - BRCMF_NROF_H2D_COMMON_MSGRINGS;
+ rings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*ring),
+@@ -1130,15 +1211,15 @@
+ ring);
+ ring->w_idx_addr = h2d_w_idx_ptr;
+ ring->r_idx_addr = h2d_r_idx_ptr;
+- h2d_w_idx_ptr += sizeof(u32);
+- h2d_r_idx_ptr += sizeof(u32);
++ h2d_w_idx_ptr += idx_offset;
++ h2d_r_idx_ptr += idx_offset;
+ }
+ devinfo->shared.flowrings = rings;
+
+ return 0;
+
+ fail:
+- brcmf_err("Allocating commonring buffers failed\n");
++ brcmf_err("Allocating ring buffers failed\n");
+ brcmf_pcie_release_ringbuffers(devinfo);
+ return -ENOMEM;
+ }
+@@ -1171,7 +1252,6 @@
+ goto fail;
+
+ memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
+- brcmf_dma_flush(devinfo->shared.scratch, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
+
+ addr = devinfo->shared.tcm_base_address +
+ BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
+@@ -1189,7 +1269,6 @@
+ goto fail;
+
+ memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
+- brcmf_dma_flush(devinfo->shared.ringupd, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
+
+ addr = devinfo->shared.tcm_base_address +
+ BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
+@@ -1276,10 +1355,13 @@
+ brcmf_err("Unsupported PCIE version %d\n", version);
+ return -EINVAL;
+ }
+- if (shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT) {
+- brcmf_err("Unsupported legacy TX mode 0x%x\n",
+- shared->flags & BRCMF_PCIE_SHARED_TXPUSH_SUPPORT);
+- return -EINVAL;
++
++ /* check firmware support dma indicies */
++ if (shared->flags & BRCMF_PCIE_SHARED_DMA_INDEX) {
++ if (shared->flags & BRCMF_PCIE_SHARED_DMA_2B_IDX)
++ devinfo->dma_idx_sz = sizeof(u16);
++ else
++ devinfo->dma_idx_sz = sizeof(u32);
+ }
+
+ addr = sharedram_addr + BRCMF_SHARED_MAX_RXBUFPOST_OFFSET;
+@@ -1609,7 +1691,7 @@
+ bus->msgbuf->commonrings[i] =
+ &devinfo->shared.commonrings[i]->commonring;
+
+- flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(flowrings),
++ flowrings = kcalloc(devinfo->shared.nrof_flowrings, sizeof(*flowrings),
+ GFP_KERNEL);
+ if (!flowrings)
+ goto fail;
+@@ -1641,8 +1723,13 @@
+ struct brcmf_pciedev_info *devinfo;
+ struct brcmf_pciedev *pcie_bus_dev;
+ struct brcmf_bus *bus;
++ u16 domain_nr;
++ u16 bus_nr;
+
+- brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
++ domain_nr = pci_domain_nr(pdev->bus) + 1;
++ bus_nr = pdev->bus->number;
++ brcmf_dbg(PCIE, "Enter %x:%x (%d/%d)\n", pdev->vendor, pdev->device,
++ domain_nr, bus_nr);
+
+ ret = -ENOMEM;
+ devinfo = kzalloc(sizeof(*devinfo), GFP_KERNEL);
+@@ -1691,10 +1778,10 @@
+ if (ret)
+ goto fail_bus;
+
+- ret = brcmf_fw_get_firmwares(bus->dev, BRCMF_FW_REQUEST_NVRAM |
+- BRCMF_FW_REQ_NV_OPTIONAL,
+- devinfo->fw_name, devinfo->nvram_name,
+- brcmf_pcie_setup);
++ ret = brcmf_fw_get_firmwares_pcie(bus->dev, BRCMF_FW_REQUEST_NVRAM |
++ BRCMF_FW_REQ_NV_OPTIONAL,
++ devinfo->fw_name, devinfo->nvram_name,
++ brcmf_pcie_setup, domain_nr, bus_nr);
+ if (ret == 0)
+ return 0;
+ fail_bus:
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/sdio.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/sdio.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/sdio.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/sdio.c 2015-07-27 23:13:06.872462130 +0200
+@@ -3550,10 +3550,6 @@
+ return;
+ }
+
+- if (bus->sdiodev->state != BRCMF_SDIOD_DATA) {
+- brcmf_err("bus is down. we have nothing to do\n");
+- return;
+- }
+ /* Count the interrupt call */
+ bus->sdcnt.intrcount++;
+ if (in_interrupt())
+diff -Nur linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/usb.c linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+--- linux-4.1.3/drivers/net/wireless/brcm80211/brcmfmac/usb.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/net/wireless/brcm80211/brcmfmac/usb.c 2015-07-27 23:13:06.872462130 +0200
+@@ -1270,8 +1270,13 @@
+ bus->chiprev = bus_pub->chiprev;
+
+ /* request firmware here */
+- brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
+- brcmf_usb_probe_phase2);
++ ret = brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo),
++ NULL, brcmf_usb_probe_phase2);
++ if (ret) {
++ brcmf_err("firmware request failed: %d\n", ret);
++ goto fail;
++ }
++
+ return 0;
+
+ fail:
+diff -Nur linux-4.1.3/drivers/spi/spi-imx.c linux-xbian-imx6/drivers/spi/spi-imx.c
+--- linux-4.1.3/drivers/spi/spi-imx.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/spi/spi-imx.c 2015-07-27 23:13:07.837020693 +0200
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright 2004-2007, 2015 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Juergen Beisert
+ *
+ * This program is free software; you can redistribute it and/or
+@@ -53,10 +53,14 @@
+ /* generic defines to abstract from the different register layouts */
+ #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
+ #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
++#define MXC_INT_TCEN (1 << 7) /* Transfer complete */
+
+ /* The maximum bytes that a sdma BD can transfer.*/
+ #define MAX_SDMA_BD_BYTES (1 << 15)
+-#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
++/* 3 Sec for 1MB or less than 1MB, else change with the transfer length */
++#define IMX_DEFAULT_DMA_TIMEOUT (msecs_to_jiffies(3000))
++#define IMX_DMA_TIMEOUT(len) ((len < 0x100000) ? IMX_DEFAULT_DMA_TIMEOUT : \
++ len * IMX_DEFAULT_DMA_TIMEOUT / 0x100000)
+ struct spi_imx_config {
+ unsigned int speed_hz;
+ unsigned int bpw;
+@@ -102,18 +106,23 @@
+
+ /* DMA */
+ unsigned int dma_is_inited;
+- unsigned int dma_finished;
+ bool usedma;
+ u32 rx_wml;
+ u32 tx_wml;
+ u32 rxt_wml;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
++ struct dma_slave_config rx_config;
++ struct dma_slave_config tx_config;
+
+ const struct spi_imx_devtype_data *devtype_data;
+ int chipselect[0];
+ };
+
++static struct spi_board_info info = {
++ .modalias = "spidev",
++};
++
+ static inline int is_imx27_cspi(struct spi_imx_data *d)
+ {
+ return d->devtype_data->devtype == IMX27_CSPI;
+@@ -201,8 +210,9 @@
+ {
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+- if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
+- && (transfer->len > spi_imx->tx_wml))
++ if (spi_imx->dma_is_inited &&
++ (transfer->len > spi_imx_get_fifosize(spi_imx)) &&
++ (transfer->len > spi_imx_get_fifosize(spi_imx)))
+ return true;
+ return false;
+ }
+@@ -227,6 +237,7 @@
+ #define MX51_ECSPI_INT 0x10
+ #define MX51_ECSPI_INT_TEEN (1 << 0)
+ #define MX51_ECSPI_INT_RREN (1 << 3)
++#define MX51_ECSPI_INT_TCEN (1 << 7)
+
+ #define MX51_ECSPI_DMA 0x14
+ #define MX51_ECSPI_DMA_TX_WML_OFFSET 0
+@@ -291,17 +302,21 @@
+ if (enable & MXC_INT_RR)
+ val |= MX51_ECSPI_INT_RREN;
+
++ if (enable & MXC_INT_TCEN)
++ val |= MX51_ECSPI_INT_TCEN;
++
+ writel(val, spi_imx->base + MX51_ECSPI_INT);
+ }
+
+ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
+ {
+ u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+-
++ /*
++ * To workaround TKT238285, SDMA script need use XCH instead of SMC
++ * just like PIO mode.
++ */
+ if (!spi_imx->usedma)
+ reg |= MX51_ECSPI_CTRL_XCH;
+- else if (!spi_imx->dma_finished)
+- reg |= MX51_ECSPI_CTRL_SMC;
+ else
+ reg &= ~MX51_ECSPI_CTRL_SMC;
+ writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
+@@ -311,7 +326,6 @@
+ struct spi_imx_config *config)
+ {
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
+- u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
+ u32 clk = config->speed_hz, delay;
+
+ /*
+@@ -368,20 +382,10 @@
+ * and enable DMA request.
+ */
+ if (spi_imx->dma_is_inited) {
+- dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+-
+- spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
+- rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
+- tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
+- rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
+- dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
+- & ~MX51_ECSPI_DMA_RX_WML_MASK
+- & ~MX51_ECSPI_DMA_RXT_WML_MASK)
+- | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
+- |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
+- |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
+- |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
+-
++ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
++ dma = (spi_imx->rx_wml - 1) << MX51_ECSPI_DMA_RX_WML_OFFSET
++ | (1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
++ | (1 << MX51_ECSPI_DMA_RXDEN_OFFSET);
+ writel(dma, spi_imx->base + MX51_ECSPI_DMA);
+ }
+
+@@ -768,6 +772,7 @@
+ {
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ struct spi_imx_config config;
++ int ret;
+
+ config.bpw = t ? t->bits_per_word : spi->bits_per_word;
+ config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+@@ -783,12 +788,35 @@
+ if (config.bpw <= 8) {
+ spi_imx->rx = spi_imx_buf_rx_u8;
+ spi_imx->tx = spi_imx_buf_tx_u8;
++ spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++ spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else if (config.bpw <= 16) {
+ spi_imx->rx = spi_imx_buf_rx_u16;
+ spi_imx->tx = spi_imx_buf_tx_u16;
++ spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
++ spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ spi_imx->rx = spi_imx_buf_rx_u32;
+ spi_imx->tx = spi_imx_buf_tx_u32;
++ spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ }
++
++ if (spi_imx->bitbang.master->can_dma &&
++ spi_imx_can_dma(spi_imx->bitbang.master, spi, t)) {
++ ret = dmaengine_slave_config(spi_imx->bitbang.master->dma_tx,
++ &spi_imx->tx_config);
++ if (ret) {
++ dev_err(&spi->dev, "error in TX dma configuration.\n");
++ return ret;
++ }
++
++ ret = dmaengine_slave_config(spi_imx->bitbang.master->dma_rx,
++ &spi_imx->rx_config);
++ if (ret) {
++ dev_err(&spi->dev, "error in RX dma configuration.\n");
++ return ret;
++ }
+ }
+
+ spi_imx->devtype_data->config(spi_imx, &config);
+@@ -817,7 +845,6 @@
+ struct spi_master *master,
+ const struct resource *res)
+ {
+- struct dma_slave_config slave_config = {};
+ int ret;
+
+ /* use pio mode for i.mx6dl chip TKT238285 */
+@@ -832,11 +859,11 @@
+ goto err;
+ }
+
+- slave_config.direction = DMA_MEM_TO_DEV;
+- slave_config.dst_addr = res->start + MXC_CSPITXDATA;
+- slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+- slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
+- ret = dmaengine_slave_config(master->dma_tx, &slave_config);
++ spi_imx->tx_config.direction = DMA_MEM_TO_DEV;
++ spi_imx->tx_config.dst_addr = res->start + MXC_CSPITXDATA;
++ spi_imx->tx_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++ spi_imx->tx_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 4;
++ ret = dmaengine_slave_config(master->dma_tx, &spi_imx->tx_config);
+ if (ret) {
+ dev_err(dev, "error in TX dma configuration.\n");
+ goto err;
+@@ -850,11 +877,11 @@
+ goto err;
+ }
+
+- slave_config.direction = DMA_DEV_TO_MEM;
+- slave_config.src_addr = res->start + MXC_CSPIRXDATA;
+- slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+- slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
+- ret = dmaengine_slave_config(master->dma_rx, &slave_config);
++ spi_imx->rx_config.direction = DMA_DEV_TO_MEM;
++ spi_imx->rx_config.src_addr = res->start + MXC_CSPIRXDATA;
++ spi_imx->rx_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
++ spi_imx->rx_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
++ ret = dmaengine_slave_config(master->dma_rx, &spi_imx->rx_config);
+ if (ret) {
+ dev_err(dev, "error in RX dma configuration.\n");
+ goto err;
+@@ -890,14 +917,33 @@
+ complete(&spi_imx->dma_tx_completion);
+ }
+
++static void spi_imx_tail_pio_set(struct spi_imx_data *spi_imx, int left)
++{
++
++ switch (spi_imx->rx_config.src_addr_width) {
++ case DMA_SLAVE_BUSWIDTH_1_BYTE:
++ spi_imx->rx = spi_imx_buf_rx_u8;
++ break;
++ case DMA_SLAVE_BUSWIDTH_2_BYTES:
++ spi_imx->rx = spi_imx_buf_rx_u16;
++ break;
++ case DMA_SLAVE_BUSWIDTH_4_BYTES:
++ spi_imx->rx = spi_imx_buf_rx_u32;
++ break;
++ default:
++ spi_imx->rx = spi_imx_buf_rx_u8;
++ break;
++ }
++
++ spi_imx->txfifo = left / spi_imx->tx_config.dst_addr_width;
++}
++
+ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
+ struct spi_transfer *transfer)
+ {
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ int ret;
+- unsigned long timeout;
+- u32 dma;
+- int left;
++ int left = 0;
+ struct spi_master *master = spi_imx->bitbang.master;
+ struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+
+@@ -914,6 +960,18 @@
+ }
+
+ if (rx) {
++ struct scatterlist *sgl_last = &rx->sgl[rx->nents - 1];
++ unsigned int orig_length = sgl_last->length;
++ int wml_mask = ~(spi_imx->rx_wml - 1);
++ /*
++ * Adjust the transfer lenth of the last scattlist if there are
++ * some tail data, use PIO read to get the tail data since DMA
++ * sometimes miss the last tail interrupt.
++ */
++ left = transfer->len % spi_imx->rx_wml;
++ if (left)
++ sgl_last->length = orig_length & wml_mask;
++
+ desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+@@ -929,46 +987,50 @@
+ reinit_completion(&spi_imx->dma_tx_completion);
+
+ /* Trigger the cspi module. */
+- spi_imx->dma_finished = 0;
+-
+- dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+- dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
+- /* Change RX_DMA_LENGTH trigger dma fetch tail data */
+- left = transfer->len % spi_imx->rxt_wml;
+- if (left)
+- writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
+- spi_imx->base + MX51_ECSPI_DMA);
+ spi_imx->devtype_data->trigger(spi_imx);
+
+ dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(master->dma_rx);
+ /* Wait SDMA to finish the data transfer.*/
+- timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
+- IMX_DMA_TIMEOUT);
+- if (!timeout) {
+- pr_warn("%s %s: I/O Error in DMA TX\n",
++ ret = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
++ IMX_DMA_TIMEOUT(transfer->len));
++ if (!ret) {
++ pr_warn("%s %s: I/O Error in DMA TX:%x\n",
+ dev_driver_string(&master->dev),
+- dev_name(&master->dev));
++ dev_name(&master->dev), transfer->len);
+ dmaengine_terminate_all(master->dma_tx);
+ } else {
+- timeout = wait_for_completion_timeout(
+- &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
+- if (!timeout) {
+- pr_warn("%s %s: I/O Error in DMA RX\n",
++ ret = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
++ IMX_DMA_TIMEOUT(transfer->len));
++ if (!ret) {
++ pr_warn("%s %s: I/O Error in DMA RX:%x\n",
+ dev_driver_string(&master->dev),
+- dev_name(&master->dev));
++ dev_name(&master->dev), transfer->len);
+ spi_imx->devtype_data->reset(spi_imx);
+ dmaengine_terminate_all(master->dma_rx);
++ } else if (left) {
++ /* read the tail data by PIO */
++ dma_sync_sg_for_cpu(master->dma_rx->device->dev,
++ &rx->sgl[rx->nents - 1], 1,
++ DMA_FROM_DEVICE);
++ spi_imx->rx_buf = transfer->rx_buf
++ + (transfer->len - left);
++ spi_imx_tail_pio_set(spi_imx, left);
++ reinit_completion(&spi_imx->xfer_done);
++
++ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TCEN);
++
++ ret = wait_for_completion_timeout(&spi_imx->xfer_done,
++ IMX_DMA_TIMEOUT(transfer->len));
++ if (!ret) {
++ pr_warn("%s %s: I/O Error in RX tail\n",
++ dev_driver_string(&master->dev),
++ dev_name(&master->dev));
++ }
+ }
+- writel(dma |
+- spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
+- spi_imx->base + MX51_ECSPI_DMA);
+ }
+
+- spi_imx->dma_finished = 1;
+- spi_imx->devtype_data->trigger(spi_imx);
+-
+- if (!timeout)
++ if (!ret)
+ ret = -ETIMEDOUT;
+ else
+ ret = transfer->len;
+@@ -1013,6 +1075,7 @@
+ spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
+ spi_imx->usedma = true;
+ ret = spi_imx_dma_transfer(spi_imx, transfer);
++ spi_imx->usedma = false; /* clear the dma flag */
+ if (ret != -EAGAIN)
+ return ret;
+ }
+@@ -1200,6 +1263,7 @@
+ goto out_clk_put;
+ }
+
++ spi_new_device(master, &info);
+ dev_info(&pdev->dev, "probed\n");
+
+ clk_disable(spi_imx->clk_ipg);
+diff -Nur linux-4.1.3/drivers/video/fbdev/core/fbmon.c linux-xbian-imx6/drivers/video/fbdev/core/fbmon.c
+--- linux-4.1.3/drivers/video/fbdev/core/fbmon.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/video/fbdev/core/fbmon.c 2015-07-27 23:13:08.674034323 +0200
+@@ -494,7 +494,7 @@
+ static int get_std_timing(unsigned char *block, struct fb_videomode *mode,
+ int ver, int rev, const struct fb_monspecs *specs)
+ {
+- int i;
++ int xres, yres = 0, ratio, i;
+
+ for (i = 0; i < DMT_SIZE; i++) {
+ u32 std_2byte_code = block[0] << 8 | block[1];
+@@ -502,6 +502,32 @@
+ break;
+ }
+
++ ratio = (block[1] & 0xc0) >> 6;
++ switch (ratio) {
++ case 0:
++ /* in EDID 1.3 the meaning of 0 changed to 16:10 (prior 1:1) */
++ if (ver < 1 || (ver == 1 && rev < 3)) {
++ yres = xres;
++ mode->vmode &= FB_VMODE_ASPECT_1;
++ } else {
++ yres = (xres * 10)/16;
++ mode->vmode &= FB_VMODE_ASPECT_16_10;
++ }
++ break;
++ case 1:
++ yres = (xres * 3)/4;
++ mode->vmode &= FB_VMODE_ASPECT_4_3;
++ break;
++ case 2:
++ yres = (xres * 4)/5;
++ mode->vmode &= FB_VMODE_ASPECT_5_4;
++ break;
++ case 3:
++ yres = (xres * 9)/16;
++ mode->vmode &= FB_VMODE_ASPECT_16_9;
++ break;
++ }
++
+ if (i < DMT_SIZE && dmt_modes[i].mode) {
+ /* DMT mode found */
+ *mode = *dmt_modes[i].mode;
+diff -Nur linux-4.1.3/drivers/video/fbdev/core/fbsysfs.c linux-xbian-imx6/drivers/video/fbdev/core/fbsysfs.c
+--- linux-4.1.3/drivers/video/fbdev/core/fbsysfs.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/video/fbdev/core/fbsysfs.c 2015-07-27 23:13:08.674034323 +0200
+@@ -20,6 +20,7 @@
+ #include <linux/fb.h>
+ #include <linux/console.h>
+ #include <linux/module.h>
++#include <linux/ctype.h>
+
+ #define FB_SYSFS_FLAG_ATTR 1
+
+@@ -119,7 +120,15 @@
+ v = 'i';
+ if (mode->vmode & FB_VMODE_DOUBLE)
+ v = 'd';
++ if (mode->vmode & FB_VMODE_3D_SBS_HALF)
++ m = 'H';
++ if (mode->vmode & FB_VMODE_3D_TOP_BOTTOM)
++ m = 'T';
++ if (mode->vmode & FB_VMODE_3D_FRAME_PACK)
++ m = 'F';
+
++ if (mode->vmode & FB_VMODE_FRACTIONAL)
++ m = tolower(m);
+ return snprintf(&buf[offset], PAGE_SIZE - offset, "%c:%dx%d%c-%d\n",
+ m, mode->xres, mode->yres, v, mode->refresh);
+ }
+diff -Nur linux-4.1.3/drivers/video/Kconfig linux-xbian-imx6/drivers/video/Kconfig
+--- linux-4.1.3/drivers/video/Kconfig 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/video/Kconfig 2015-07-27 23:13:08.626204974 +0200
+@@ -30,6 +30,8 @@
+ source "drivers/video/fbdev/Kconfig"
+ endmenu
+
++source "drivers/video/mxc/Kconfig"
++
+ source "drivers/video/backlight/Kconfig"
+
+ config VGASTATE
+diff -Nur linux-4.1.3/drivers/video/Makefile linux-xbian-imx6/drivers/video/Makefile
+--- linux-4.1.3/drivers/video/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/drivers/video/Makefile 2015-07-27 23:13:08.626204974 +0200
+@@ -6,6 +6,7 @@
+ obj-y += backlight/
+
+ obj-y += fbdev/
++obj-y += mxc/
+
+ obj-$(CONFIG_VIDEOMODE_HELPERS) += display_timing.o videomode.o
+ ifeq ($(CONFIG_OF),y)
+diff -Nur linux-4.1.3/drivers/video/mxc/Kconfig linux-xbian-imx6/drivers/video/mxc/Kconfig
+--- linux-4.1.3/drivers/video/mxc/Kconfig 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/Kconfig 2015-07-27 23:13:08.749764128 +0200
+@@ -0,0 +1,55 @@
++config FB_MXC
++ tristate "MXC Framebuffer support"
++ depends on FB
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ select FB_MODE_HELPERS
++ default y
++ help
++ This is a framebuffer device for the MXC LCD Controller.
++ See <http://www.linux-fbdev.org/> for information on framebuffer
++ devices.
++
++ If you plan to use the LCD display with your MXC system, say
++ Y here.
++
++config FB_MXC_SYNC_PANEL
++ depends on FB_MXC
++ tristate "Synchronous Panel Framebuffer"
++
++config FB_MXC_LDB
++ tristate "MXC LDB"
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3_FSL || IMX_IPUV3_CORE
++
++config FB_MXC_MIPI_DSI
++ tristate "MXC MIPI_DSI"
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3_FSL || IMX_IPUV3_CORE
++
++config FB_MXC_TRULY_WVGA_SYNC_PANEL
++ tristate "TRULY WVGA Panel"
++ depends on FB_MXC_SYNC_PANEL
++ depends on FB_MXC_MIPI_DSI
++
++config FB_MXC_HDMI
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3_FSL || IMX_IPUV3_CORE
++ depends on I2C
++ tristate "MXC HDMI driver support"
++ select MFD_MXC_HDMI
++ select HDMI
++ help
++ Driver for the on-chip MXC HDMI controller.
++
++config FB_MXC_DCIC
++ tristate "MXC DCIC"
++ depends on FB_MXC_SYNC_PANEL
++ depends on MXC_IPU_V3_FSL || FB_MXS
++ select VIDEOMODE_HELPERS
++
++config FB_MXC_EDID
++ depends on FB_MXC && I2C
++ tristate "MXC EDID support"
++ default y
+diff -Nur linux-4.1.3/drivers/video/mxc/ldb.c linux-xbian-imx6/drivers/video/mxc/ldb.c
+--- linux-4.1.3/drivers/video/mxc/ldb.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/ldb.c 2015-07-27 23:13:08.749764128 +0200
+@@ -0,0 +1,1036 @@
++/*
++ * Copyright (C) 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file mxc_ldb.c
++ *
++ * @brief This file contains the LDB driver device interface and fops
++ * functions.
++ */
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/ipu.h>
++#include <linux/mxcfb.h>
++#include <linux/regulator/consumer.h>
++#include <linux/spinlock.h>
++#include <linux/of_device.h>
++#include <linux/mod_devicetable.h>
++#include "mxc_dispdrv.h"
++
++#define DISPDRV_LDB "ldb"
++
++#define LDB_BGREF_RMODE_MASK 0x00008000
++#define LDB_BGREF_RMODE_INT 0x00008000
++#define LDB_BGREF_RMODE_EXT 0x0
++
++#define LDB_DI1_VS_POL_MASK 0x00000400
++#define LDB_DI1_VS_POL_ACT_LOW 0x00000400
++#define LDB_DI1_VS_POL_ACT_HIGH 0x0
++#define LDB_DI0_VS_POL_MASK 0x00000200
++#define LDB_DI0_VS_POL_ACT_LOW 0x00000200
++#define LDB_DI0_VS_POL_ACT_HIGH 0x0
++
++#define LDB_BIT_MAP_CH1_MASK 0x00000100
++#define LDB_BIT_MAP_CH1_JEIDA 0x00000100
++#define LDB_BIT_MAP_CH1_SPWG 0x0
++#define LDB_BIT_MAP_CH0_MASK 0x00000040
++#define LDB_BIT_MAP_CH0_JEIDA 0x00000040
++#define LDB_BIT_MAP_CH0_SPWG 0x0
++
++#define LDB_DATA_WIDTH_CH1_MASK 0x00000080
++#define LDB_DATA_WIDTH_CH1_24 0x00000080
++#define LDB_DATA_WIDTH_CH1_18 0x0
++#define LDB_DATA_WIDTH_CH0_MASK 0x00000020
++#define LDB_DATA_WIDTH_CH0_24 0x00000020
++#define LDB_DATA_WIDTH_CH0_18 0x0
++
++#define LDB_CH1_MODE_MASK 0x0000000C
++#define LDB_CH1_MODE_EN_TO_DI1 0x0000000C
++#define LDB_CH1_MODE_EN_TO_DI0 0x00000004
++#define LDB_CH1_MODE_DISABLE 0x0
++#define LDB_CH0_MODE_MASK 0x00000003
++#define LDB_CH0_MODE_EN_TO_DI1 0x00000003
++#define LDB_CH0_MODE_EN_TO_DI0 0x00000001
++#define LDB_CH0_MODE_DISABLE 0x0
++
++#define LDB_SPLIT_MODE_EN 0x00000010
++
++enum {
++ IMX6_LDB,
++};
++
++enum {
++ LDB_IMX6 = 1,
++};
++
++struct fsl_mxc_ldb_platform_data {
++ int devtype;
++ u32 ext_ref;
++#define LDB_SPL_DI0 1
++#define LDB_SPL_DI1 2
++#define LDB_DUL_DI0 3
++#define LDB_DUL_DI1 4
++#define LDB_SIN0 5
++#define LDB_SIN1 6
++#define LDB_SEP0 7
++#define LDB_SEP1 8
++ int mode;
++ int ipu_id;
++ int disp_id;
++
++ /*only work for separate mode*/
++ int sec_ipu_id;
++ int sec_disp_id;
++};
++
++struct ldb_data {
++ struct platform_device *pdev;
++ struct mxc_dispdrv_handle *disp_ldb;
++ uint32_t *reg;
++ uint32_t *control_reg;
++ uint32_t *gpr3_reg;
++ uint32_t control_reg_data;
++ struct regulator *lvds_bg_reg;
++ int mode;
++ bool inited;
++ struct ldb_setting {
++ struct clk *di_clk;
++ struct clk *ldb_di_clk;
++ struct clk *div_3_5_clk;
++ struct clk *div_7_clk;
++ struct clk *div_sel_clk;
++ bool active;
++ bool clk_en;
++ int ipu;
++ int di;
++ uint32_t ch_mask;
++ uint32_t ch_val;
++ } setting[2];
++ struct notifier_block nb;
++};
++
++static int g_ldb_mode;
++
++static struct fb_videomode ldb_modedb[] = {
++ {
++ "LDB-WXGA", 60, 1280, 800, 14065,
++ 40, 40,
++ 10, 3,
++ 80, 10,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++ {
++ "LDB-XGA", 60, 1024, 768, 15385,
++ 220, 40,
++ 21, 7,
++ 60, 10,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++ {
++ "LDB-1080P60", 60, 1920, 1080, 7692,
++ 100, 40,
++ 30, 3,
++ 10, 2,
++ 0,
++ FB_VMODE_NONINTERLACED,
++ FB_MODE_IS_DETAILED,},
++};
++static int ldb_modedb_sz = ARRAY_SIZE(ldb_modedb);
++
++static inline int is_imx6_ldb(struct fsl_mxc_ldb_platform_data *plat_data)
++{
++ return (plat_data->devtype == LDB_IMX6);
++}
++
++static int bits_per_pixel(int pixel_fmt)
++{
++ switch (pixel_fmt) {
++ case IPU_PIX_FMT_BGR24:
++ case IPU_PIX_FMT_RGB24:
++ return 24;
++ break;
++ case IPU_PIX_FMT_BGR666:
++ case IPU_PIX_FMT_RGB666:
++ case IPU_PIX_FMT_LVDS666:
++ return 18;
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++static int valid_mode(int pixel_fmt)
++{
++ return ((pixel_fmt == IPU_PIX_FMT_RGB24) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR24) ||
++ (pixel_fmt == IPU_PIX_FMT_LVDS666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB666) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR666));
++}
++
++static int parse_ldb_mode(char *mode)
++{
++ int ldb_mode;
++
++ if (!strcmp(mode, "spl0"))
++ ldb_mode = LDB_SPL_DI0;
++ else if (!strcmp(mode, "spl1"))
++ ldb_mode = LDB_SPL_DI1;
++ else if (!strcmp(mode, "dul0"))
++ ldb_mode = LDB_DUL_DI0;
++ else if (!strcmp(mode, "dul1"))
++ ldb_mode = LDB_DUL_DI1;
++ else if (!strcmp(mode, "sin0"))
++ ldb_mode = LDB_SIN0;
++ else if (!strcmp(mode, "sin1"))
++ ldb_mode = LDB_SIN1;
++ else if (!strcmp(mode, "sep0"))
++ ldb_mode = LDB_SEP0;
++ else if (!strcmp(mode, "sep1"))
++ ldb_mode = LDB_SEP1;
++ else
++ ldb_mode = -EINVAL;
++
++ return ldb_mode;
++}
++
++#ifndef MODULE
++/*
++ * "ldb=spl0/1" -- split mode on DI0/1
++ * "ldb=dul0/1" -- dual mode on DI0/1
++ * "ldb=sin0/1" -- single mode on LVDS0/1
++ * "ldb=sep0/1" -- separate mode begin from LVDS0/1
++ *
++ * there are two LVDS channels(LVDS0 and LVDS1) which can transfer video
++ * datas, there two channels can be used as split/dual/single/separate mode.
++ *
++ * split mode means display data from DI0 or DI1 will send to both channels
++ * LVDS0+LVDS1.
++ * dual mode means display data from DI0 or DI1 will be duplicated on LVDS0
++ * and LVDS1, it said, LVDS0 and LVDS1 has the same content.
++ * single mode means only work for DI0/DI1->LVDS0 or DI0/DI1->LVDS1.
++ * separate mode means you can make DI0/DI1->LVDS0 and DI0/DI1->LVDS1 work
++ * at the same time.
++ */
++static int __init ldb_setup(char *options)
++{
++ g_ldb_mode = parse_ldb_mode(options);
++ return (g_ldb_mode < 0) ? 0 : 1;
++}
++__setup("ldb=", ldb_setup);
++#endif
++
++static int ldb_get_of_property(struct platform_device *pdev,
++ struct fsl_mxc_ldb_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ u32 ipu_id, disp_id;
++ u32 sec_ipu_id, sec_disp_id;
++ char *mode;
++ u32 ext_ref;
++
++ err = of_property_read_string(np, "mode", (const char **)&mode);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property mode fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ext_ref", &ext_ref);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ext_ref fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "sec_ipu_id", &sec_ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property sec_ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "sec_disp_id", &sec_disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property sec_disp_id fail\n");
++ return err;
++ }
++
++ plat_data->mode = parse_ldb_mode(mode);
++ plat_data->ext_ref = ext_ref;
++ plat_data->ipu_id = ipu_id;
++ plat_data->disp_id = disp_id;
++ plat_data->sec_ipu_id = sec_ipu_id;
++ plat_data->sec_disp_id = sec_disp_id;
++
++ return err;
++}
++
++static int find_ldb_setting(struct ldb_data *ldb, struct fb_info *fbi)
++{
++ char *id_di[] = {
++ "DISP3 BG",
++ "DISP3 BG - DI1",
++ };
++ char id[16];
++ int i;
++
++ for (i = 0; i < 2; i++) {
++ if (ldb->setting[i].active) {
++ memset(id, 0, 16);
++ memcpy(id, id_di[ldb->setting[i].di],
++ strlen(id_di[ldb->setting[i].di]));
++ id[4] += ldb->setting[i].ipu;
++ if (!strcmp(id, fbi->fix.id))
++ return i;
++ }
++ }
++ return -EINVAL;
++}
++
++static int ldb_disp_setup(struct mxc_dispdrv_handle *disp, struct fb_info *fbi)
++{
++ uint32_t reg, val;
++ uint32_t pixel_clk, rounded_pixel_clk;
++ struct clk *ldb_clk_parent;
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int setting_idx, di;
++ int ret;
++
++ setting_idx = find_ldb_setting(ldb, fbi);
++ if (setting_idx < 0)
++ return setting_idx;
++
++ di = ldb->setting[setting_idx].di;
++
++ /* restore channel mode setting */
++ val = readl(ldb->control_reg);
++ val |= ldb->setting[setting_idx].ch_val;
++ writel(val, ldb->control_reg);
++ dev_dbg(&ldb->pdev->dev, "LDB setup, control reg:0x%x\n",
++ readl(ldb->control_reg));
++
++ /* vsync setup */
++ reg = readl(ldb->control_reg);
++ if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) {
++ if (di == 0)
++ reg = (reg & ~LDB_DI0_VS_POL_MASK)
++ | LDB_DI0_VS_POL_ACT_HIGH;
++ else
++ reg = (reg & ~LDB_DI1_VS_POL_MASK)
++ | LDB_DI1_VS_POL_ACT_HIGH;
++ } else {
++ if (di == 0)
++ reg = (reg & ~LDB_DI0_VS_POL_MASK)
++ | LDB_DI0_VS_POL_ACT_LOW;
++ else
++ reg = (reg & ~LDB_DI1_VS_POL_MASK)
++ | LDB_DI1_VS_POL_ACT_LOW;
++ }
++ writel(reg, ldb->control_reg);
++
++ /* clk setup */
++ if (ldb->setting[setting_idx].clk_en)
++ clk_disable_unprepare(ldb->setting[setting_idx].ldb_di_clk);
++ pixel_clk = (PICOS2KHZ(fbi->var.pixclock)) * 1000UL;
++ ldb_clk_parent = clk_get_parent(ldb->setting[setting_idx].ldb_di_clk);
++ if (IS_ERR(ldb_clk_parent)) {
++ dev_err(&ldb->pdev->dev, "get ldb di parent clk fail\n");
++ return PTR_ERR(ldb_clk_parent);
++ }
++ if ((ldb->mode == LDB_SPL_DI0) || (ldb->mode == LDB_SPL_DI1))
++ ret = clk_set_rate(ldb_clk_parent, pixel_clk * 7 / 2);
++ else
++ ret = clk_set_rate(ldb_clk_parent, pixel_clk * 7);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "set ldb parent clk fail:%d\n", ret);
++ return ret;
++ }
++ rounded_pixel_clk = clk_round_rate(ldb->setting[setting_idx].ldb_di_clk,
++ pixel_clk);
++ dev_dbg(&ldb->pdev->dev, "pixel_clk:%d, rounded_pixel_clk:%d\n",
++ pixel_clk, rounded_pixel_clk);
++ ret = clk_set_rate(ldb->setting[setting_idx].ldb_di_clk,
++ rounded_pixel_clk);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "set ldb di clk fail:%d\n", ret);
++ return ret;
++ }
++ ret = clk_prepare_enable(ldb->setting[setting_idx].ldb_di_clk);
++ if (ret < 0) {
++ dev_err(&ldb->pdev->dev, "enable ldb di clk fail:%d\n", ret);
++ return ret;
++ }
++
++ if (!ldb->setting[setting_idx].clk_en)
++ ldb->setting[setting_idx].clk_en = true;
++
++ return 0;
++}
++
++int ldb_fb_event(struct notifier_block *nb, unsigned long val, void *v)
++{
++ struct ldb_data *ldb = container_of(nb, struct ldb_data, nb);
++ struct fb_event *event = v;
++ struct fb_info *fbi = event->info;
++ int index;
++ uint32_t data;
++
++ index = find_ldb_setting(ldb, fbi);
++ if (index < 0)
++ return 0;
++
++ fbi->mode = (struct fb_videomode *)fb_match_mode(&fbi->var,
++ &fbi->modelist);
++
++ if (!fbi->mode) {
++ dev_warn(&ldb->pdev->dev,
++ "LDB: can not find mode for xres=%d, yres=%d\n",
++ fbi->var.xres, fbi->var.yres);
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ data = readl(ldb->control_reg);
++ data &= ~ldb->setting[index].ch_mask;
++ writel(data, ldb->control_reg);
++ }
++ return 0;
++ }
++
++ switch (val) {
++ case FB_EVENT_BLANK:
++ {
++ if (*((int *)event->data) == FB_BLANK_UNBLANK) {
++ if (!ldb->setting[index].clk_en) {
++ clk_enable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = true;
++ }
++ } else {
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ data = readl(ldb->control_reg);
++ data &= ~ldb->setting[index].ch_mask;
++ writel(data, ldb->control_reg);
++ dev_dbg(&ldb->pdev->dev,
++ "LDB blank, control reg:0x%x\n",
++ readl(ldb->control_reg));
++ }
++ }
++ break;
++ }
++ case FB_EVENT_SUSPEND:
++ if (ldb->setting[index].clk_en) {
++ clk_disable(ldb->setting[index].ldb_di_clk);
++ ldb->setting[index].clk_en = false;
++ }
++ break;
++ default:
++ break;
++ }
++ return 0;
++}
++
++#define LVDS_MUX_CTL_WIDTH 2
++#define LVDS_MUX_CTL_MASK 3
++#define LVDS0_MUX_CTL_OFFS 6
++#define LVDS1_MUX_CTL_OFFS 8
++#define LVDS0_MUX_CTL_MASK (LVDS_MUX_CTL_MASK << 6)
++#define LVDS1_MUX_CTL_MASK (LVDS_MUX_CTL_MASK << 8)
++#define ROUTE_IPU_DI(ipu, di) (((ipu << 1) | di) & LVDS_MUX_CTL_MASK)
++static int ldb_ipu_ldb_route(int ipu, int di, struct ldb_data *ldb)
++{
++ uint32_t reg;
++ int channel;
++ int shift;
++ int mode = ldb->mode;
++
++ reg = readl(ldb->gpr3_reg);
++ if (mode < LDB_SIN0) {
++ reg &= ~(LVDS0_MUX_CTL_MASK | LVDS1_MUX_CTL_MASK);
++ reg |= (ROUTE_IPU_DI(ipu, di) << LVDS0_MUX_CTL_OFFS) |
++ (ROUTE_IPU_DI(ipu, di) << LVDS1_MUX_CTL_OFFS);
++ dev_dbg(&ldb->pdev->dev,
++ "Dual/Split mode both channels route to IPU%d-DI%d\n",
++ ipu, di);
++ } else if ((mode == LDB_SIN0) || (mode == LDB_SIN1)) {
++ reg &= ~(LVDS0_MUX_CTL_MASK | LVDS1_MUX_CTL_MASK);
++ channel = mode - LDB_SIN0;
++ shift = LVDS0_MUX_CTL_OFFS + channel * LVDS_MUX_CTL_WIDTH;
++ reg |= ROUTE_IPU_DI(ipu, di) << shift;
++ dev_dbg(&ldb->pdev->dev,
++ "Single mode channel %d route to IPU%d-DI%d\n",
++ channel, ipu, di);
++ } else {
++ static bool first = true;
++
++ if (first) {
++ if (mode == LDB_SEP0) {
++ reg &= ~LVDS0_MUX_CTL_MASK;
++ channel = 0;
++ } else {
++ reg &= ~LVDS1_MUX_CTL_MASK;
++ channel = 1;
++ }
++ first = false;
++ } else {
++ if (mode == LDB_SEP0) {
++ reg &= ~LVDS1_MUX_CTL_MASK;
++ channel = 1;
++ } else {
++ reg &= ~LVDS0_MUX_CTL_MASK;
++ channel = 0;
++ }
++ }
++
++ shift = LVDS0_MUX_CTL_OFFS + channel * LVDS_MUX_CTL_WIDTH;
++ reg |= ROUTE_IPU_DI(ipu, di) << shift;
++
++ dev_dbg(&ldb->pdev->dev,
++ "Separate mode channel %d route to IPU%d-DI%d\n",
++ channel, ipu, di);
++ }
++ writel(reg, ldb->gpr3_reg);
++
++ return 0;
++}
++
++static int ldb_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret = 0, i, lvds_channel = 0;
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ struct fsl_mxc_ldb_platform_data *plat_data = ldb->pdev->dev.platform_data;
++ struct resource *res;
++ uint32_t reg, setting_idx;
++ uint32_t ch_mask = 0, ch_val = 0;
++ uint32_t ipu_id, disp_id;
++ char di_clk[] = "ipu1_di0_sel";
++ char ldb_clk[] = "ldb_di0";
++ char div_3_5_clk[] = "di0_div_3_5";
++ char div_7_clk[] = "di0_div_7";
++ char div_sel_clk[] = "di0_div_sel";
++
++ /* if input format not valid, make RGB666 as default*/
++ if (!valid_mode(setting->if_fmt)) {
++ dev_warn(&ldb->pdev->dev, "Input pixel format not valid"
++ " use default RGB666\n");
++ setting->if_fmt = IPU_PIX_FMT_RGB666;
++ }
++
++ if (!ldb->inited) {
++ setting_idx = 0;
++ res = platform_get_resource(ldb->pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&ldb->pdev->dev, "get iomem fail.\n");
++ return -ENOMEM;
++ }
++
++ ldb->reg = devm_ioremap(&ldb->pdev->dev, res->start,
++ resource_size(res));
++ ldb->control_reg = ldb->reg + 2;
++ ldb->gpr3_reg = ldb->reg + 3;
++
++ /* ipu selected by platform data setting */
++ setting->dev_id = plat_data->ipu_id;
++
++ reg = readl(ldb->control_reg);
++
++ /* refrence resistor select */
++ reg &= ~LDB_BGREF_RMODE_MASK;
++ if (plat_data->ext_ref)
++ reg |= LDB_BGREF_RMODE_EXT;
++ else
++ reg |= LDB_BGREF_RMODE_INT;
++
++ /* TODO: now only use SPWG data mapping for both channel */
++ reg &= ~(LDB_BIT_MAP_CH0_MASK | LDB_BIT_MAP_CH1_MASK);
++ reg |= LDB_BIT_MAP_CH0_SPWG | LDB_BIT_MAP_CH1_SPWG;
++
++ /* channel mode setting */
++ reg &= ~(LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ reg &= ~(LDB_DATA_WIDTH_CH0_MASK | LDB_DATA_WIDTH_CH1_MASK);
++
++ if (bits_per_pixel(setting->if_fmt) == 24)
++ reg |= LDB_DATA_WIDTH_CH0_24 | LDB_DATA_WIDTH_CH1_24;
++ else
++ reg |= LDB_DATA_WIDTH_CH0_18 | LDB_DATA_WIDTH_CH1_18;
++
++ if (g_ldb_mode >= LDB_SPL_DI0)
++ ldb->mode = g_ldb_mode;
++ else
++ ldb->mode = plat_data->mode;
++
++ if ((ldb->mode == LDB_SIN0) || (ldb->mode == LDB_SIN1)) {
++ ret = ldb->mode - LDB_SIN0;
++ if (plat_data->disp_id != ret) {
++ dev_warn(&ldb->pdev->dev,
++ "change IPU DI%d to IPU DI%d for LDB "
++ "channel%d.\n",
++ plat_data->disp_id, ret, ret);
++ plat_data->disp_id = ret;
++ }
++ } else if (((ldb->mode == LDB_SEP0) || (ldb->mode == LDB_SEP1))
++ && is_imx6_ldb(plat_data)) {
++ if (plat_data->disp_id == plat_data->sec_disp_id) {
++ dev_err(&ldb->pdev->dev,
++ "For LVDS separate mode,"
++ "two DIs should be different!\n");
++ return -EINVAL;
++ }
++
++ if (((!plat_data->disp_id) && (ldb->mode == LDB_SEP1))
++ || ((plat_data->disp_id) &&
++ (ldb->mode == LDB_SEP0))) {
++ dev_dbg(&ldb->pdev->dev,
++ "LVDS separate mode:"
++ "swap DI configuration!\n");
++ ipu_id = plat_data->ipu_id;
++ disp_id = plat_data->disp_id;
++ plat_data->ipu_id = plat_data->sec_ipu_id;
++ plat_data->disp_id = plat_data->sec_disp_id;
++ plat_data->sec_ipu_id = ipu_id;
++ plat_data->sec_disp_id = disp_id;
++ }
++ }
++
++ if (ldb->mode == LDB_SPL_DI0) {
++ reg |= LDB_SPLIT_MODE_EN | LDB_CH0_MODE_EN_TO_DI0
++ | LDB_CH1_MODE_EN_TO_DI0;
++ setting->disp_id = 0;
++ } else if (ldb->mode == LDB_SPL_DI1) {
++ reg |= LDB_SPLIT_MODE_EN | LDB_CH0_MODE_EN_TO_DI1
++ | LDB_CH1_MODE_EN_TO_DI1;
++ setting->disp_id = 1;
++ } else if (ldb->mode == LDB_DUL_DI0) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ reg |= LDB_CH0_MODE_EN_TO_DI0 | LDB_CH1_MODE_EN_TO_DI0;
++ setting->disp_id = 0;
++ } else if (ldb->mode == LDB_DUL_DI1) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ reg |= LDB_CH0_MODE_EN_TO_DI1 | LDB_CH1_MODE_EN_TO_DI1;
++ setting->disp_id = 1;
++ } else if (ldb->mode == LDB_SIN0) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ setting->disp_id = plat_data->disp_id;
++ if (setting->disp_id == 0)
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ ch_mask = LDB_CH0_MODE_MASK;
++ ch_val = reg & LDB_CH0_MODE_MASK;
++ } else if (ldb->mode == LDB_SIN1) {
++ reg &= ~LDB_SPLIT_MODE_EN;
++ setting->disp_id = plat_data->disp_id;
++ if (setting->disp_id == 0)
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = LDB_CH1_MODE_MASK;
++ ch_val = reg & LDB_CH1_MODE_MASK;
++ } else { /* separate mode*/
++ setting->disp_id = plat_data->disp_id;
++
++ /* first output is LVDS0 or LVDS1 */
++ if (ldb->mode == LDB_SEP0)
++ lvds_channel = 0;
++ else
++ lvds_channel = 1;
++
++ reg &= ~LDB_SPLIT_MODE_EN;
++
++ if ((lvds_channel == 0) && (setting->disp_id == 0))
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else if ((lvds_channel == 0) && (setting->disp_id == 1))
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ else if ((lvds_channel == 1) && (setting->disp_id == 0))
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = lvds_channel ? LDB_CH1_MODE_MASK :
++ LDB_CH0_MODE_MASK;
++ ch_val = reg & ch_mask;
++
++ if (bits_per_pixel(setting->if_fmt) == 24) {
++ if (lvds_channel == 0)
++ reg &= ~LDB_DATA_WIDTH_CH1_24;
++ else
++ reg &= ~LDB_DATA_WIDTH_CH0_24;
++ } else {
++ if (lvds_channel == 0)
++ reg &= ~LDB_DATA_WIDTH_CH1_18;
++ else
++ reg &= ~LDB_DATA_WIDTH_CH0_18;
++ }
++ }
++
++ writel(reg, ldb->control_reg);
++ if (ldb->mode < LDB_SIN0) {
++ ch_mask = LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK;
++ ch_val = reg & (LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ }
++ } else { /* second time for separate mode */
++ if ((ldb->mode == LDB_SPL_DI0) ||
++ (ldb->mode == LDB_SPL_DI1) ||
++ (ldb->mode == LDB_DUL_DI0) ||
++ (ldb->mode == LDB_DUL_DI1) ||
++ (ldb->mode == LDB_SIN0) ||
++ (ldb->mode == LDB_SIN1)) {
++ dev_err(&ldb->pdev->dev, "for second ldb disp"
++ "ldb mode should in separate mode\n");
++ return -EINVAL;
++ }
++
++ setting_idx = 1;
++ if (is_imx6_ldb(plat_data)) {
++ setting->dev_id = plat_data->sec_ipu_id;
++ setting->disp_id = plat_data->sec_disp_id;
++ } else {
++ setting->dev_id = plat_data->ipu_id;
++ setting->disp_id = !plat_data->disp_id;
++ }
++ if (setting->disp_id == ldb->setting[0].di) {
++ dev_err(&ldb->pdev->dev, "Err: for second ldb disp in"
++ "separate mode, DI should be different!\n");
++ return -EINVAL;
++ }
++
++ /* second output is LVDS0 or LVDS1 */
++ if (ldb->mode == LDB_SEP0)
++ lvds_channel = 1;
++ else
++ lvds_channel = 0;
++
++ reg = readl(ldb->control_reg);
++ if ((lvds_channel == 0) && (setting->disp_id == 0))
++ reg |= LDB_CH0_MODE_EN_TO_DI0;
++ else if ((lvds_channel == 0) && (setting->disp_id == 1))
++ reg |= LDB_CH0_MODE_EN_TO_DI1;
++ else if ((lvds_channel == 1) && (setting->disp_id == 0))
++ reg |= LDB_CH1_MODE_EN_TO_DI0;
++ else
++ reg |= LDB_CH1_MODE_EN_TO_DI1;
++ ch_mask = lvds_channel ? LDB_CH1_MODE_MASK :
++ LDB_CH0_MODE_MASK;
++ ch_val = reg & ch_mask;
++
++ if (bits_per_pixel(setting->if_fmt) == 24) {
++ if (lvds_channel == 0)
++ reg |= LDB_DATA_WIDTH_CH0_24;
++ else
++ reg |= LDB_DATA_WIDTH_CH1_24;
++ } else {
++ if (lvds_channel == 0)
++ reg |= LDB_DATA_WIDTH_CH0_18;
++ else
++ reg |= LDB_DATA_WIDTH_CH1_18;
++ }
++ writel(reg, ldb->control_reg);
++ }
++
++ /* get clocks */
++ if (is_imx6_ldb(plat_data) &&
++ ((ldb->mode == LDB_SEP0) || (ldb->mode == LDB_SEP1))) {
++ ldb_clk[6] += lvds_channel;
++ div_3_5_clk[2] += lvds_channel;
++ div_7_clk[2] += lvds_channel;
++ div_sel_clk[2] += lvds_channel;
++ } else {
++ ldb_clk[6] += setting->disp_id;
++ div_3_5_clk[2] += setting->disp_id;
++ div_7_clk[2] += setting->disp_id;
++ div_sel_clk[2] += setting->disp_id;
++ }
++ ldb->setting[setting_idx].ldb_di_clk = clk_get(&ldb->pdev->dev,
++ ldb_clk);
++ if (IS_ERR(ldb->setting[setting_idx].ldb_di_clk)) {
++ dev_err(&ldb->pdev->dev, "get ldb clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].ldb_di_clk);
++ }
++
++ ldb->setting[setting_idx].div_3_5_clk = clk_get(&ldb->pdev->dev,
++ div_3_5_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_3_5_clk)) {
++ dev_err(&ldb->pdev->dev, "get div 3.5 clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_3_5_clk);
++ }
++ ldb->setting[setting_idx].div_7_clk = clk_get(&ldb->pdev->dev,
++ div_7_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_7_clk)) {
++ dev_err(&ldb->pdev->dev, "get div 7 clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_7_clk);
++ }
++
++ ldb->setting[setting_idx].div_sel_clk = clk_get(&ldb->pdev->dev,
++ div_sel_clk);
++ if (IS_ERR(ldb->setting[setting_idx].div_sel_clk)) {
++ dev_err(&ldb->pdev->dev, "get div sel clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].div_sel_clk);
++ }
++
++ di_clk[3] += setting->dev_id;
++ di_clk[7] += setting->disp_id;
++ ldb->setting[setting_idx].di_clk = clk_get(&ldb->pdev->dev,
++ di_clk);
++ if (IS_ERR(ldb->setting[setting_idx].di_clk)) {
++ dev_err(&ldb->pdev->dev, "get di clk failed\n");
++ return PTR_ERR(ldb->setting[setting_idx].di_clk);
++ }
++
++ ldb->setting[setting_idx].ch_mask = ch_mask;
++ ldb->setting[setting_idx].ch_val = ch_val;
++
++ if (is_imx6_ldb(plat_data))
++ ldb_ipu_ldb_route(setting->dev_id, setting->disp_id, ldb);
++
++ /* must use spec video mode defined by driver */
++ ret = fb_find_mode(&setting->fbi->var, setting->fbi, setting->dft_mode_str,
++ ldb_modedb, ldb_modedb_sz, NULL, setting->default_bpp);
++ if (ret != 1)
++ fb_videomode_to_var(&setting->fbi->var, &ldb_modedb[0]);
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < ldb_modedb_sz; i++) {
++ struct fb_videomode m;
++ fb_var_to_videomode(&m, &setting->fbi->var);
++ if (fb_mode_is_equal(&m, &ldb_modedb[i])) {
++ fb_add_videomode(&ldb_modedb[i],
++ &setting->fbi->modelist);
++ break;
++ }
++ }
++
++ ldb->setting[setting_idx].ipu = setting->dev_id;
++ ldb->setting[setting_idx].di = setting->disp_id;
++
++ return ret;
++}
++
++static int ldb_post_disp_init(struct mxc_dispdrv_handle *disp,
++ int ipu_id, int disp_id)
++{
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int setting_idx = ldb->inited ? 1 : 0;
++ int ret = 0;
++
++ if (!ldb->inited) {
++ ldb->nb.notifier_call = ldb_fb_event;
++ fb_register_client(&ldb->nb);
++ }
++
++ ret = clk_set_parent(ldb->setting[setting_idx].di_clk,
++ ldb->setting[setting_idx].ldb_di_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set ldb_di clk as"
++ "the parent of ipu_di clk\n");
++ return ret;
++ }
++
++ if ((ldb->mode == LDB_SPL_DI0) || (ldb->mode == LDB_SPL_DI1)) {
++ ret = clk_set_parent(ldb->setting[setting_idx].div_sel_clk,
++ ldb->setting[setting_idx].div_3_5_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set div 3.5 clk as"
++ "the parent of div sel clk\n");
++ return ret;
++ }
++ } else {
++ ret = clk_set_parent(ldb->setting[setting_idx].div_sel_clk,
++ ldb->setting[setting_idx].div_7_clk);
++ if (ret) {
++ dev_err(&ldb->pdev->dev, "fail to set div 7 clk as"
++ "the parent of div sel clk\n");
++ return ret;
++ }
++ }
++
++ /* save active ldb setting for fb notifier */
++ ldb->setting[setting_idx].active = true;
++
++ ldb->inited = true;
++ return ret;
++}
++
++static void ldb_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct ldb_data *ldb = mxc_dispdrv_getdata(disp);
++ int i;
++
++ writel(0, ldb->control_reg);
++
++ for (i = 0; i < 2; i++) {
++ clk_disable(ldb->setting[i].ldb_di_clk);
++ clk_put(ldb->setting[i].ldb_di_clk);
++ clk_put(ldb->setting[i].div_3_5_clk);
++ clk_put(ldb->setting[i].div_7_clk);
++ clk_put(ldb->setting[i].div_sel_clk);
++ }
++
++ fb_unregister_client(&ldb->nb);
++}
++
++static struct mxc_dispdrv_driver ldb_drv = {
++ .name = DISPDRV_LDB,
++ .init = ldb_disp_init,
++ .post_init = ldb_post_disp_init,
++ .deinit = ldb_disp_deinit,
++ .setup = ldb_disp_setup,
++};
++
++static int ldb_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++ uint32_t data;
++
++ if (!ldb->inited)
++ return 0;
++ data = readl(ldb->control_reg);
++ ldb->control_reg_data = data;
++ data &= ~(LDB_CH0_MODE_MASK | LDB_CH1_MODE_MASK);
++ writel(data, ldb->control_reg);
++
++ return 0;
++}
++
++static int ldb_resume(struct platform_device *pdev)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++
++ if (!ldb->inited)
++ return 0;
++ writel(ldb->control_reg_data, ldb->control_reg);
++
++ return 0;
++}
++
++static struct platform_device_id imx_ldb_devtype[] = {
++ {
++ .name = "ldb-imx6",
++ .driver_data = LDB_IMX6,
++ }, {
++ /* sentinel */
++ }
++};
++
++static const struct of_device_id imx_ldb_dt_ids[] = {
++ { .compatible = "fsl,imx6q-ldb", .data = &imx_ldb_devtype[IMX6_LDB],},
++ { /* sentinel */ }
++};
++
++/*!
++ * This function is called by the driver framework to initialize the LDB
++ * device.
++ *
++ * @param dev The device structure for the LDB passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int ldb_probe(struct platform_device *pdev)
++{
++ int ret = 0;
++ struct ldb_data *ldb;
++ struct fsl_mxc_ldb_platform_data *plat_data;
++ const struct of_device_id *of_id =
++ of_match_device(imx_ldb_dt_ids, &pdev->dev);
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ ldb = devm_kzalloc(&pdev->dev, sizeof(struct ldb_data), GFP_KERNEL);
++ if (!ldb)
++ return -ENOMEM;
++
++ plat_data = devm_kzalloc(&pdev->dev,
++ sizeof(struct fsl_mxc_ldb_platform_data),
++ GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++ if (of_id)
++ pdev->id_entry = of_id->data;
++ plat_data->devtype = pdev->id_entry->driver_data;
++
++ ret = ldb_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get ldb of property fail\n");
++ return ret;
++ }
++
++ ldb->pdev = pdev;
++ ldb->disp_ldb = mxc_dispdrv_register(&ldb_drv);
++ mxc_dispdrv_setdata(ldb->disp_ldb, ldb);
++
++ dev_set_drvdata(&pdev->dev, ldb);
++
++ dev_dbg(&pdev->dev, "%s exit\n", __func__);
++ return ret;
++}
++
++static int ldb_remove(struct platform_device *pdev)
++{
++ struct ldb_data *ldb = dev_get_drvdata(&pdev->dev);
++
++ if (!ldb->inited)
++ return 0;
++ mxc_dispdrv_puthandle(ldb->disp_ldb);
++ mxc_dispdrv_unregister(ldb->disp_ldb);
++ return 0;
++}
++
++static struct platform_driver mxcldb_driver = {
++ .driver = {
++ .name = "mxc_ldb",
++ .of_match_table = imx_ldb_dt_ids,
++ },
++ .probe = ldb_probe,
++ .remove = ldb_remove,
++ .suspend = ldb_suspend,
++ .resume = ldb_resume,
++};
++
++static int __init ldb_init(void)
++{
++ return platform_driver_register(&mxcldb_driver);
++}
++
++static void __exit ldb_uninit(void)
++{
++ platform_driver_unregister(&mxcldb_driver);
++}
++
++module_init(ldb_init);
++module_exit(ldb_uninit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MXC LDB driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/drivers/video/mxc/Makefile linux-xbian-imx6/drivers/video/mxc/Makefile
+--- linux-4.1.3/drivers/video/mxc/Makefile 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/Makefile 2015-07-27 23:13:08.749764128 +0200
+@@ -0,0 +1,7 @@
++obj-$(CONFIG_FB_MXC_LDB) += ldb.o
++obj-$(CONFIG_FB_MXC_MIPI_DSI) += mipi_dsi.o
++obj-$(CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL) += mxcfb_hx8369_wvga.o
++obj-$(CONFIG_FB_MXC_HDMI) += mxc_hdmi.o
++obj-$(CONFIG_FB_MXC_EDID) += mxc_edid.o
++obj-$(CONFIG_FB_MXC_SYNC_PANEL) += mxc_dispdrv.o mxc_lcdif.o mxc_ipuv3_fb.o
++obj-$(CONFIG_FB_MXC_DCIC) += mxc_dcic.o
+diff -Nur linux-4.1.3/drivers/video/mxc/mipi_dsi.c linux-xbian-imx6/drivers/video/mxc/mipi_dsi.c
+--- linux-4.1.3/drivers/video/mxc/mipi_dsi.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mipi_dsi.c 2015-07-27 23:13:08.749764128 +0200
+@@ -0,0 +1,953 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/ipu.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/mipi_dsi.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/backlight.h>
++#include <linux/of_device.h>
++#include <linux/regulator/consumer.h>
++#include <linux/reset.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <video/mipi_display.h>
++
++#include "mxc_dispdrv.h"
++#include "mipi_dsi.h"
++
++#define DISPDRV_MIPI "mipi_dsi"
++#define ROUND_UP(x) ((x)+1)
++#define NS2PS_RATIO (1000)
++#define NUMBER_OF_CHUNKS (0x8)
++#define NULL_PKT_SIZE (0x8)
++#define PHY_BTA_MAXTIME (0xd00)
++#define PHY_LP2HS_MAXTIME (0x40)
++#define PHY_HS2LP_MAXTIME (0x40)
++#define PHY_STOP_WAIT_TIME (0x20)
++#define DSI_CLKMGR_CFG_CLK_DIV (0x107)
++#define DSI_GEN_PLD_DATA_BUF_ENTRY (0x10)
++#define MIPI_MUX_CTRL(v) (((v) & 0x3) << 4)
++#define MIPI_LCD_SLEEP_MODE_DELAY (120)
++#define MIPI_DSI_REG_RW_TIMEOUT (20)
++#define MIPI_DSI_PHY_TIMEOUT (10)
++
++static struct mipi_dsi_match_lcd mipi_dsi_lcd_db[] = {
++#ifdef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++ {
++ "TRULY-WVGA",
++ {mipid_hx8369_get_lcd_videomode, mipid_hx8369_lcd_setup}
++ },
++#endif
++ {
++ "", {NULL, NULL}
++ }
++};
++
++struct _mipi_dsi_phy_pll_clk {
++ u32 max_phy_clk;
++ u32 config;
++};
++
++/* configure data for DPHY PLL 27M reference clk out */
++static const struct _mipi_dsi_phy_pll_clk mipi_dsi_phy_pll_clk_table[] = {
++ {1000, 0x74}, /* 950-1000MHz */
++ {950, 0x54}, /* 900-950Mhz */
++ {900, 0x34}, /* 850-900Mhz */
++ {850, 0x14}, /* 800-850MHz */
++ {800, 0x32}, /* 750-800MHz */
++ {750, 0x12}, /* 700-750Mhz */
++ {700, 0x30}, /* 650-700Mhz */
++ {650, 0x10}, /* 600-650MHz */
++ {600, 0x2e}, /* 550-600MHz */
++ {550, 0x0e}, /* 500-550Mhz */
++ {500, 0x2c}, /* 450-500Mhz */
++ {450, 0x0c}, /* 400-450MHz */
++ {400, 0x4a}, /* 360-400MHz */
++ {360, 0x2a}, /* 330-360Mhz */
++ {330, 0x48}, /* 300-330Mhz */
++ {300, 0x28}, /* 270-300MHz */
++ {270, 0x08}, /* 250-270MHz */
++ {250, 0x46}, /* 240-250Mhz */
++ {240, 0x26}, /* 210-240Mhz */
++ {210, 0x06}, /* 200-210MHz */
++ {200, 0x44}, /* 180-200MHz */
++ {180, 0x24}, /* 160-180MHz */
++ {160, 0x04}, /* 150-160MHz */
++};
++
++static int valid_mode(int pixel_fmt)
++{
++ return ((pixel_fmt == IPU_PIX_FMT_RGB24) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR24) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB565) ||
++ (pixel_fmt == IPU_PIX_FMT_BGR666) ||
++ (pixel_fmt == IPU_PIX_FMT_RGB332));
++}
++
++static inline void mipi_dsi_read_register(struct mipi_dsi_info *mipi_dsi,
++ u32 reg, u32 *val)
++{
++ *val = ioread32(mipi_dsi->mmio_base + reg);
++ dev_dbg(&mipi_dsi->pdev->dev, "read_reg:0x%02x, val:0x%08x.\n",
++ reg, *val);
++}
++
++static inline void mipi_dsi_write_register(struct mipi_dsi_info *mipi_dsi,
++ u32 reg, u32 val)
++{
++ iowrite32(val, mipi_dsi->mmio_base + reg);
++ dev_dbg(&mipi_dsi->pdev->dev, "\t\twrite_reg:0x%02x, val:0x%08x.\n",
++ reg, val);
++}
++
++int mipi_dsi_pkt_write(struct mipi_dsi_info *mipi_dsi,
++ u8 data_type, const u32 *buf, int len)
++{
++ u32 val;
++ u32 status = 0;
++ int write_len = len;
++ uint32_t timeout = 0;
++
++ if (len) {
++ /* generic long write command */
++ while (len / DSI_GEN_PLD_DATA_BUF_SIZE) {
++ mipi_dsi_write_register(mipi_dsi,
++ MIPI_DSI_GEN_PLD_DATA, *buf);
++ buf++;
++ len -= DSI_GEN_PLD_DATA_BUF_SIZE;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ while ((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ }
++ }
++ /* write the remainder bytes */
++ if (len > 0) {
++ while ((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi,
++ MIPI_DSI_CMD_PKT_STATUS, &status);
++ }
++ mipi_dsi_write_register(mipi_dsi,
++ MIPI_DSI_GEN_PLD_DATA, *buf);
++ }
++
++ val = data_type | ((write_len & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ } else {
++ /* generic short write command */
++ val = data_type | ((*buf & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ }
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &status);
++ while ((status & DSI_CMD_PKT_STATUS_GEN_CMD_FULL) ==
++ DSI_CMD_PKT_STATUS_GEN_CMD_FULL) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &status);
++ }
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_GEN_HDR, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &status);
++ while (!((status & DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY) ==
++ DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY) ||
++ !((status & DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY) ==
++ DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY)) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &status);
++ }
++
++ return 0;
++}
++
++int mipi_dsi_pkt_read(struct mipi_dsi_info *mipi_dsi,
++ u8 data_type, u32 *buf, int len)
++{
++ u32 val;
++ int read_len = 0;
++ uint32_t timeout = 0;
++
++ if (!len) {
++ mipi_dbg("%s, len = 0 invalid error!\n", __func__);
++ return -EINVAL;
++ }
++
++ val = data_type | ((*buf & DSI_GEN_HDR_DATA_MASK)
++ << DSI_GEN_HDR_DATA_SHIFT);
++ memset(buf, 0, len);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_GEN_HDR, val);
++
++ /* wait for cmd to sent out */
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &val);
++ while ((val & DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) !=
++ DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ }
++ /* wait for entire response stroed in FIFO */
++ while ((val & DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) ==
++ DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_REG_RW_TIMEOUT)
++ return -EIO;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ }
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS, &val);
++ while (!(val & DSI_CMD_PKT_STATUS_GEN_PLD_R_EMPTY)) {
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_GEN_PLD_DATA, buf);
++ read_len += DSI_GEN_PLD_DATA_BUF_SIZE;
++ buf++;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_PKT_STATUS,
++ &val);
++ if (read_len == (DSI_GEN_PLD_DATA_BUF_ENTRY *
++ DSI_GEN_PLD_DATA_BUF_SIZE))
++ break;
++ }
++
++ if ((len <= read_len) &&
++ ((len + DSI_GEN_PLD_DATA_BUF_SIZE) >= read_len))
++ return 0;
++ else {
++ dev_err(&mipi_dsi->pdev->dev,
++ "actually read_len:%d != len:%d.\n", read_len, len);
++ return -ERANGE;
++ }
++}
++
++int mipi_dsi_dcs_cmd(struct mipi_dsi_info *mipi_dsi,
++ u8 cmd, const u32 *param, int num)
++{
++ int err = 0;
++ u32 buf[DSI_CMD_BUF_MAXSIZE];
++
++ switch (cmd) {
++ case MIPI_DCS_EXIT_SLEEP_MODE:
++ case MIPI_DCS_ENTER_SLEEP_MODE:
++ case MIPI_DCS_SET_DISPLAY_ON:
++ case MIPI_DCS_SET_DISPLAY_OFF:
++ buf[0] = cmd;
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_DCS_SHORT_WRITE, buf, 0);
++ break;
++
++ default:
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command:0x%x Not supported!\n", cmd);
++ break;
++ }
++
++ return err;
++}
++
++static void mipi_dsi_dphy_init(struct mipi_dsi_info *mipi_dsi,
++ u32 cmd, u32 data)
++{
++ u32 val;
++ u32 timeout = 0;
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP, DSI_PWRUP_POWERUP);
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL1,
++ (0x10000 | cmd));
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 2);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL1, (0 | data));
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 2);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TST_CTRL0, 0);
++ val = DSI_PHY_RSTZ_EN_CLK | DSI_PHY_RSTZ_DISABLE_RST |
++ DSI_PHY_RSTZ_DISABLE_SHUTDOWN;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ while ((val & DSI_PHY_STATUS_LOCK) != DSI_PHY_STATUS_LOCK) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_PHY_TIMEOUT) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "Error: phy lock timeout!\n");
++ break;
++ }
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ }
++ timeout = 0;
++ while ((val & DSI_PHY_STATUS_STOPSTATE_CLK_LANE) !=
++ DSI_PHY_STATUS_STOPSTATE_CLK_LANE) {
++ msleep(1);
++ timeout++;
++ if (timeout == MIPI_DSI_PHY_TIMEOUT) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "Error: phy lock lane timeout!\n");
++ break;
++ }
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_PHY_STATUS, &val);
++ }
++}
++
++static void mipi_dsi_enable_controller(struct mipi_dsi_info *mipi_dsi,
++ bool init)
++{
++ u32 val;
++ u32 lane_byte_clk_period;
++ struct fb_videomode *mode = mipi_dsi->mode;
++ struct mipi_lcd_config *lcd_config = mipi_dsi->lcd_config;
++
++ if (init) {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ,
++ DSI_PHY_RSTZ_RST);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CLKMGR_CFG,
++ DSI_CLKMGR_CFG_CLK_DIV);
++
++ if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT))
++ val = DSI_DPI_CFG_VSYNC_ACT_LOW;
++ if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT))
++ val |= DSI_DPI_CFG_HSYNC_ACT_LOW;
++ if ((mode->sync & FB_SYNC_OE_LOW_ACT))
++ val |= DSI_DPI_CFG_DATAEN_ACT_LOW;
++ if (MIPI_RGB666_LOOSELY == lcd_config->dpi_fmt)
++ val |= DSI_DPI_CFG_EN18LOOSELY;
++ val |= (lcd_config->dpi_fmt & DSI_DPI_CFG_COLORCODE_MASK)
++ << DSI_DPI_CFG_COLORCODE_SHIFT;
++ val |= (lcd_config->virtual_ch & DSI_DPI_CFG_VID_MASK)
++ << DSI_DPI_CFG_VID_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_DPI_CFG, val);
++
++ val = DSI_PCKHDL_CFG_EN_BTA |
++ DSI_PCKHDL_CFG_EN_ECC_RX |
++ DSI_PCKHDL_CFG_EN_CRC_RX;
++
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PCKHDL_CFG, val);
++
++ val = (mode->xres & DSI_VID_PKT_CFG_VID_PKT_SZ_MASK)
++ << DSI_VID_PKT_CFG_VID_PKT_SZ_SHIFT;
++ val |= (NUMBER_OF_CHUNKS & DSI_VID_PKT_CFG_NUM_CHUNKS_MASK)
++ << DSI_VID_PKT_CFG_NUM_CHUNKS_SHIFT;
++ val |= (NULL_PKT_SIZE & DSI_VID_PKT_CFG_NULL_PKT_SZ_MASK)
++ << DSI_VID_PKT_CFG_NULL_PKT_SZ_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_PKT_CFG, val);
++
++ /* enable LP mode when TX DCS cmd and enable DSI command mode */
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG,
++ MIPI_DSI_CMD_MODE_CFG_EN_LOWPOWER);
++
++ /* mipi lane byte clk period in ns unit */
++ lane_byte_clk_period = NS2PS_RATIO /
++ (lcd_config->max_phy_clk / BITS_PER_BYTE);
++ val = ROUND_UP(mode->hsync_len * mode->pixclock /
++ NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HSA_TIME_SHIFT;
++ val |= ROUND_UP(mode->left_margin * mode->pixclock /
++ NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HBP_TIME_SHIFT;
++ val |= ROUND_UP((mode->left_margin + mode->right_margin +
++ mode->hsync_len + mode->xres) * mode->pixclock
++ / NS2PS_RATIO / lane_byte_clk_period)
++ << DSI_TME_LINE_CFG_HLINE_TIME_SHIFT;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_TMR_LINE_CFG, val);
++
++ val = ((mode->vsync_len & DSI_VTIMING_CFG_VSA_LINES_MASK)
++ << DSI_VTIMING_CFG_VSA_LINES_SHIFT);
++ val |= ((mode->upper_margin & DSI_VTIMING_CFG_VBP_LINES_MASK)
++ << DSI_VTIMING_CFG_VBP_LINES_SHIFT);
++ val |= ((mode->lower_margin & DSI_VTIMING_CFG_VFP_LINES_MASK)
++ << DSI_VTIMING_CFG_VFP_LINES_SHIFT);
++ val |= ((mode->yres & DSI_VTIMING_CFG_V_ACT_LINES_MASK)
++ << DSI_VTIMING_CFG_V_ACT_LINES_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VTIMING_CFG, val);
++
++ val = ((PHY_BTA_MAXTIME & DSI_PHY_TMR_CFG_BTA_TIME_MASK)
++ << DSI_PHY_TMR_CFG_BTA_TIME_SHIFT);
++ val |= ((PHY_LP2HS_MAXTIME & DSI_PHY_TMR_CFG_LP2HS_TIME_MASK)
++ << DSI_PHY_TMR_CFG_LP2HS_TIME_SHIFT);
++ val |= ((PHY_HS2LP_MAXTIME & DSI_PHY_TMR_CFG_HS2LP_TIME_MASK)
++ << DSI_PHY_TMR_CFG_HS2LP_TIME_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_TMR_CFG, val);
++
++ val = (((lcd_config->data_lane_num - 1) &
++ DSI_PHY_IF_CFG_N_LANES_MASK)
++ << DSI_PHY_IF_CFG_N_LANES_SHIFT);
++ val |= ((PHY_STOP_WAIT_TIME & DSI_PHY_IF_CFG_WAIT_TIME_MASK)
++ << DSI_PHY_IF_CFG_WAIT_TIME_SHIFT);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CFG, val);
++
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST0, &val);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST1, &val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_ERROR_MSK0, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_ERROR_MSK1, 0);
++
++ mipi_dsi_dphy_init(mipi_dsi, DSI_PHY_CLK_INIT_COMMAND,
++ mipi_dsi->dphy_pll_config);
++ } else {
++ mipi_dsi_dphy_init(mipi_dsi, DSI_PHY_CLK_INIT_COMMAND,
++ mipi_dsi->dphy_pll_config);
++ }
++}
++
++static void mipi_dsi_disable_controller(struct mipi_dsi_info *mipi_dsi)
++{
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP, DSI_PWRUP_RESET);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_RSTZ, DSI_PHY_RSTZ_RST);
++}
++
++static irqreturn_t mipi_dsi_irq_handler(int irq, void *data)
++{
++ u32 mask0;
++ u32 mask1;
++ u32 status0;
++ u32 status1;
++ struct mipi_dsi_info *mipi_dsi;
++
++ mipi_dsi = (struct mipi_dsi_info *)data;
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST0, &status0);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_ST1, &status1);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_MSK0, &mask0);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_ERROR_MSK1, &mask1);
++
++ if ((status0 & (~mask0)) || (status1 & (~mask1))) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "mipi_dsi IRQ status0:0x%x, status1:0x%x!\n",
++ status0, status1);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static inline void mipi_dsi_set_mode(struct mipi_dsi_info *mipi_dsi,
++ bool cmd_mode)
++{
++ u32 val;
++
++ if (cmd_mode) {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, &val);
++ val |= MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_MODE_CFG, 0);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_POWERUP);
++ } else {
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_RESET);
++ /* Disable Command mode when tranfering video data */
++ mipi_dsi_read_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, &val);
++ val &= ~MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_CMD_MODE_CFG, val);
++ val = DSI_VID_MODE_CFG_EN | DSI_VID_MODE_CFG_EN_BURSTMODE |
++ DSI_VID_MODE_CFG_EN_LP_MODE;
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_VID_MODE_CFG, val);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PWR_UP,
++ DSI_PWRUP_POWERUP);
++ mipi_dsi_write_register(mipi_dsi, MIPI_DSI_PHY_IF_CTRL,
++ DSI_PHY_IF_CTRL_TX_REQ_CLK_HS);
++ }
++}
++
++static int mipi_dsi_power_on(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (!mipi_dsi->dsi_power_on) {
++ clk_prepare_enable(mipi_dsi->dphy_clk);
++ clk_prepare_enable(mipi_dsi->cfg_clk);
++ mipi_dsi_enable_controller(mipi_dsi, false);
++ mipi_dsi_set_mode(mipi_dsi, false);
++ /* host send pclk/hsync/vsync for two frames before sleep-out */
++ msleep((1000/mipi_dsi->mode->refresh + 1) << 1);
++ mipi_dsi_set_mode(mipi_dsi, true);
++ err = mipi_dsi_dcs_cmd(mipi_dsi, MIPI_DCS_EXIT_SLEEP_MODE,
++ NULL, 0);
++ if (err) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command sleep-in error!\n");
++ }
++ msleep(MIPI_LCD_SLEEP_MODE_DELAY);
++ mipi_dsi_set_mode(mipi_dsi, false);
++ mipi_dsi->dsi_power_on = 1;
++ }
++
++ return 0;
++}
++
++void mipi_dsi_power_off(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (mipi_dsi->dsi_power_on) {
++ mipi_dsi_set_mode(mipi_dsi, true);
++ err = mipi_dsi_dcs_cmd(mipi_dsi, MIPI_DCS_ENTER_SLEEP_MODE,
++ NULL, 0);
++ if (err) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "MIPI DSI DCS Command display on error!\n");
++ }
++ /* To allow time for the supply voltages
++ * and clock circuits to stabilize.
++ */
++ msleep(5);
++ /* video stream timing on */
++ mipi_dsi_set_mode(mipi_dsi, false);
++ msleep(MIPI_LCD_SLEEP_MODE_DELAY);
++
++ mipi_dsi_set_mode(mipi_dsi, true);
++ mipi_dsi_disable_controller(mipi_dsi);
++ mipi_dsi->dsi_power_on = 0;
++ clk_disable_unprepare(mipi_dsi->dphy_clk);
++ clk_disable_unprepare(mipi_dsi->cfg_clk);
++ }
++}
++
++static int mipi_dsi_lcd_init(struct mipi_dsi_info *mipi_dsi,
++ struct mxc_dispdrv_setting *setting)
++{
++ int err;
++ int size;
++ int i;
++ struct fb_videomode *mipi_lcd_modedb;
++ struct fb_videomode mode;
++ struct device *dev = &mipi_dsi->pdev->dev;
++
++ for (i = 0; i < ARRAY_SIZE(mipi_dsi_lcd_db); i++) {
++ if (!strcmp(mipi_dsi->lcd_panel,
++ mipi_dsi_lcd_db[i].lcd_panel)) {
++ mipi_dsi->lcd_callback =
++ &mipi_dsi_lcd_db[i].lcd_callback;
++ break;
++ }
++ }
++ if (i == ARRAY_SIZE(mipi_dsi_lcd_db)) {
++ dev_err(dev, "failed to find supported lcd panel.\n");
++ return -EINVAL;
++ }
++ /* get the videomode in the order: cmdline->platform data->driver */
++ mipi_dsi->lcd_callback->get_mipi_lcd_videomode(&mipi_lcd_modedb, &size,
++ &mipi_dsi->lcd_config);
++ err = fb_find_mode(&setting->fbi->var, setting->fbi,
++ setting->dft_mode_str,
++ mipi_lcd_modedb, size, NULL,
++ setting->default_bpp);
++ if (err != 1)
++ fb_videomode_to_var(&setting->fbi->var, mipi_lcd_modedb);
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < size; i++) {
++ fb_var_to_videomode(&mode, &setting->fbi->var);
++ if (fb_mode_is_equal(&mode, mipi_lcd_modedb + i)) {
++ err = fb_add_videomode(mipi_lcd_modedb + i,
++ &setting->fbi->modelist);
++ /* Note: only support fb mode from driver */
++ mipi_dsi->mode = mipi_lcd_modedb + i;
++ break;
++ }
++ }
++ if ((err < 0) || (size == i)) {
++ dev_err(dev, "failed to add videomode.\n");
++ return err;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(mipi_dsi_phy_pll_clk_table); i++) {
++ if (mipi_dsi_phy_pll_clk_table[i].max_phy_clk <
++ mipi_dsi->lcd_config->max_phy_clk)
++ break;
++ }
++ if ((i == ARRAY_SIZE(mipi_dsi_phy_pll_clk_table)) ||
++ (mipi_dsi->lcd_config->max_phy_clk >
++ mipi_dsi_phy_pll_clk_table[0].max_phy_clk)) {
++ dev_err(dev, "failed to find data in"
++ "mipi_dsi_phy_pll_clk_table.\n");
++ return -EINVAL;
++ }
++ mipi_dsi->dphy_pll_config = mipi_dsi_phy_pll_clk_table[--i].config;
++ dev_dbg(dev, "dphy_pll_config:0x%x.\n", mipi_dsi->dphy_pll_config);
++
++ return 0;
++}
++
++int mipi_dsi_enable(struct mxc_dispdrv_handle *disp)
++{
++ int err;
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ if (!mipi_dsi->lcd_inited) {
++ err = clk_prepare_enable(mipi_dsi->dphy_clk);
++ err |= clk_prepare_enable(mipi_dsi->cfg_clk);
++ if (err)
++ dev_err(&mipi_dsi->pdev->dev,
++ "clk enable error:%d!\n", err);
++ mipi_dsi_enable_controller(mipi_dsi, true);
++ err = mipi_dsi->lcd_callback->mipi_lcd_setup(
++ mipi_dsi);
++ if (err < 0) {
++ dev_err(&mipi_dsi->pdev->dev,
++ "failed to init mipi lcd.");
++ clk_disable_unprepare(mipi_dsi->dphy_clk);
++ clk_disable_unprepare(mipi_dsi->cfg_clk);
++ return err;
++ }
++ mipi_dsi_set_mode(mipi_dsi, false);
++ mipi_dsi->dsi_power_on = 1;
++ mipi_dsi->lcd_inited = 1;
++ }
++ mipi_dsi_power_on(mipi_dsi->disp_mipi);
++
++ return 0;
++}
++
++static int mipi_dsi_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ struct mipi_dsi_info *mipi_dsi = mxc_dispdrv_getdata(disp);
++ struct device *dev = &mipi_dsi->pdev->dev;
++ int ret = 0;
++
++ if (!valid_mode(setting->if_fmt)) {
++ dev_warn(dev, "Input pixel format not valid"
++ "use default RGB24\n");
++ setting->if_fmt = IPU_PIX_FMT_RGB24;
++ }
++
++ setting->dev_id = mipi_dsi->dev_id;
++ setting->disp_id = mipi_dsi->disp_id;
++
++ ret = mipi_dsi_lcd_init(mipi_dsi, setting);
++ if (ret) {
++ dev_err(dev, "failed to init mipi dsi lcd\n");
++ return ret;
++ }
++
++ dev_dbg(dev, "MIPI DSI dispdrv inited!\n");
++ return ret;
++}
++
++static void mipi_dsi_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct mipi_dsi_info *mipi_dsi;
++
++ mipi_dsi = mxc_dispdrv_getdata(disp);
++
++ mipi_dsi_power_off(mipi_dsi->disp_mipi);
++ if (mipi_dsi->bl)
++ backlight_device_unregister(mipi_dsi->bl);
++}
++
++static struct mxc_dispdrv_driver mipi_dsi_drv = {
++ .name = DISPDRV_MIPI,
++ .init = mipi_dsi_disp_init,
++ .deinit = mipi_dsi_disp_deinit,
++ .enable = mipi_dsi_enable,
++ .disable = mipi_dsi_power_off,
++};
++
++static int imx6q_mipi_dsi_get_mux(int dev_id, int disp_id)
++{
++ if (dev_id > 1 || disp_id > 1)
++ return -EINVAL;
++
++ return (dev_id << 5) | (disp_id << 4);
++}
++
++static struct mipi_dsi_bus_mux imx6q_mipi_dsi_mux[] = {
++ {
++ .reg = IOMUXC_GPR3,
++ .mask = IMX6Q_GPR3_MIPI_MUX_CTL_MASK,
++ .get_mux = imx6q_mipi_dsi_get_mux,
++ },
++};
++
++static int imx6dl_mipi_dsi_get_mux(int dev_id, int disp_id)
++{
++ if (dev_id > 1 || disp_id > 1)
++ return -EINVAL;
++
++ /* MIPI DSI source is LCDIF */
++ if (dev_id)
++ disp_id = 0;
++
++ return (dev_id << 5) | (disp_id << 4);
++}
++
++static struct mipi_dsi_bus_mux imx6dl_mipi_dsi_mux[] = {
++ {
++ .reg = IOMUXC_GPR3,
++ .mask = IMX6Q_GPR3_MIPI_MUX_CTL_MASK,
++ .get_mux = imx6dl_mipi_dsi_get_mux,
++ },
++};
++
++static const struct of_device_id imx_mipi_dsi_dt_ids[] = {
++ { .compatible = "fsl,imx6q-mipi-dsi", .data = imx6q_mipi_dsi_mux, },
++ { .compatible = "fsl,imx6dl-mipi-dsi", .data = imx6dl_mipi_dsi_mux, },
++ { }
++};
++MODULE_DEVICE_TABLE(of, imx_mipi_dsi_dt_ids);
++
++/**
++ * This function is called by the driver framework to initialize the MIPI DSI
++ * device.
++ *
++ * @param pdev The device structure for the MIPI DSI passed in by the
++ * driver framework.
++ *
++ * @return Returns 0 on success or negative error code on error
++ */
++static int mipi_dsi_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id =
++ of_match_device(of_match_ptr(imx_mipi_dsi_dt_ids),
++ &pdev->dev);
++ struct mipi_dsi_info *mipi_dsi;
++ struct resource *res;
++ u32 dev_id, disp_id;
++ const char *lcd_panel;
++ unsigned int mux;
++ int ret = 0;
++
++ mipi_dsi = devm_kzalloc(&pdev->dev, sizeof(*mipi_dsi), GFP_KERNEL);
++ if (!mipi_dsi)
++ return -ENOMEM;
++
++ ret = of_property_read_string(np, "lcd_panel", &lcd_panel);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property lcd_panel\n");
++ return ret;
++ }
++
++ ret = of_property_read_u32(np, "dev_id", &dev_id);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property dev_id\n");
++ return ret;
++ }
++ ret = of_property_read_u32(np, "disp_id", &disp_id);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to read of property disp_id\n");
++ return ret;
++ }
++ mipi_dsi->dev_id = dev_id;
++ mipi_dsi->disp_id = disp_id;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "failed to get platform resource 0\n");
++ return -ENODEV;
++ }
++
++ if (!devm_request_mem_region(&pdev->dev, res->start,
++ resource_size(res), pdev->name))
++ return -EBUSY;
++
++ mipi_dsi->mmio_base = devm_ioremap(&pdev->dev, res->start,
++ resource_size(res));
++ if (!mipi_dsi->mmio_base)
++ return -EBUSY;
++
++ mipi_dsi->irq = platform_get_irq(pdev, 0);
++ if (mipi_dsi->irq < 0) {
++ dev_err(&pdev->dev, "failed get device irq\n");
++ return -ENODEV;
++ }
++
++ ret = devm_request_irq(&pdev->dev, mipi_dsi->irq,
++ mipi_dsi_irq_handler,
++ 0, "mipi_dsi", mipi_dsi);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++
++ mipi_dsi->dphy_clk = devm_clk_get(&pdev->dev, "mipi_pllref_clk");
++ if (IS_ERR(mipi_dsi->dphy_clk)) {
++ dev_err(&pdev->dev, "failed to get dphy pll_ref_clk\n");
++ return PTR_ERR(mipi_dsi->dphy_clk);
++ }
++
++ mipi_dsi->cfg_clk = devm_clk_get(&pdev->dev, "mipi_cfg_clk");
++ if (IS_ERR(mipi_dsi->cfg_clk)) {
++ dev_err(&pdev->dev, "failed to get cfg_clk\n");
++ return PTR_ERR(mipi_dsi->cfg_clk);
++ }
++
++ mipi_dsi->disp_power_on = devm_regulator_get(&pdev->dev,
++ "disp-power-on");
++ if (!IS_ERR(mipi_dsi->disp_power_on)) {
++ ret = regulator_enable(mipi_dsi->disp_power_on);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to enable display "
++ "power regulator, err=%d\n", ret);
++ return ret;
++ }
++ } else {
++ mipi_dsi->disp_power_on = NULL;
++ }
++
++ ret = device_reset(&pdev->dev);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to reset: %d\n", ret);
++ goto dev_reset_fail;
++ }
++
++ if (of_id)
++ mipi_dsi->bus_mux = of_id->data;
++
++ mipi_dsi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
++ if (IS_ERR(mipi_dsi->regmap)) {
++ dev_err(&pdev->dev, "failed to get parent regmap\n");
++ ret = PTR_ERR(mipi_dsi->regmap);
++ goto get_parent_regmap_fail;
++ }
++
++ mux = mipi_dsi->bus_mux->get_mux(dev_id, disp_id);
++ if (mux >= 0)
++ regmap_update_bits(mipi_dsi->regmap, mipi_dsi->bus_mux->reg,
++ mipi_dsi->bus_mux->mask, mux);
++ else
++ dev_warn(&pdev->dev, "invalid dev_id or disp_id muxing\n");
++
++ mipi_dsi->lcd_panel = kstrdup(lcd_panel, GFP_KERNEL);
++ if (!mipi_dsi->lcd_panel) {
++ dev_err(&pdev->dev, "failed to allocate lcd panel name\n");
++ ret = -ENOMEM;
++ goto kstrdup_fail;
++ }
++
++ mipi_dsi->pdev = pdev;
++ mipi_dsi->disp_mipi = mxc_dispdrv_register(&mipi_dsi_drv);
++ if (IS_ERR(mipi_dsi->disp_mipi)) {
++ dev_err(&pdev->dev, "mxc_dispdrv_register error\n");
++ ret = PTR_ERR(mipi_dsi->disp_mipi);
++ goto dispdrv_reg_fail;
++ }
++
++ mxc_dispdrv_setdata(mipi_dsi->disp_mipi, mipi_dsi);
++ dev_set_drvdata(&pdev->dev, mipi_dsi);
++
++ dev_info(&pdev->dev, "i.MX MIPI DSI driver probed\n");
++ return ret;
++
++dispdrv_reg_fail:
++ kfree(mipi_dsi->lcd_panel);
++kstrdup_fail:
++get_parent_regmap_fail:
++dev_reset_fail:
++ if (mipi_dsi->disp_power_on)
++ regulator_disable(mipi_dsi->disp_power_on);
++ return ret;
++}
++
++static void mipi_dsi_shutdown(struct platform_device *pdev)
++{
++ struct mipi_dsi_info *mipi_dsi = dev_get_drvdata(&pdev->dev);
++
++ mipi_dsi_power_off(mipi_dsi->disp_mipi);
++}
++
++static int mipi_dsi_remove(struct platform_device *pdev)
++{
++ struct mipi_dsi_info *mipi_dsi = dev_get_drvdata(&pdev->dev);
++
++ mxc_dispdrv_puthandle(mipi_dsi->disp_mipi);
++ mxc_dispdrv_unregister(mipi_dsi->disp_mipi);
++
++ if (mipi_dsi->disp_power_on)
++ regulator_disable(mipi_dsi->disp_power_on);
++
++ kfree(mipi_dsi->lcd_panel);
++ dev_set_drvdata(&pdev->dev, NULL);
++
++ return 0;
++}
++
++static struct platform_driver mipi_dsi_driver = {
++ .driver = {
++ .of_match_table = imx_mipi_dsi_dt_ids,
++ .name = "mxc_mipi_dsi",
++ },
++ .probe = mipi_dsi_probe,
++ .remove = mipi_dsi_remove,
++ .shutdown = mipi_dsi_shutdown,
++};
++
++static int __init mipi_dsi_init(void)
++{
++ int err;
++
++ err = platform_driver_register(&mipi_dsi_driver);
++ if (err) {
++ pr_err("mipi_dsi_driver register failed\n");
++ return -ENODEV;
++ }
++ pr_info("MIPI DSI driver module loaded\n");
++ return 0;
++}
++
++static void __exit mipi_dsi_cleanup(void)
++{
++ platform_driver_unregister(&mipi_dsi_driver);
++}
++
++module_init(mipi_dsi_init);
++module_exit(mipi_dsi_cleanup);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX MIPI DSI driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/drivers/video/mxc/mipi_dsi.h linux-xbian-imx6/drivers/video/mxc/mipi_dsi.h
+--- linux-4.1.3/drivers/video/mxc/mipi_dsi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mipi_dsi.h 2015-07-27 23:13:08.749764128 +0200
+@@ -0,0 +1,112 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MIPI_DSI_H__
++#define __MIPI_DSI_H__
++
++#include <linux/regmap.h>
++
++#ifdef DEBUG
++#define mipi_dbg(fmt, ...) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
++#else
++#define mipi_dbg(fmt, ...)
++#endif
++
++#define DSI_CMD_BUF_MAXSIZE (32)
++
++/* DPI interface pixel color coding map */
++enum mipi_dsi_dpi_fmt {
++ MIPI_RGB565_PACKED = 0,
++ MIPI_RGB565_LOOSELY,
++ MIPI_RGB565_CONFIG3,
++ MIPI_RGB666_PACKED,
++ MIPI_RGB666_LOOSELY,
++ MIPI_RGB888,
++};
++
++struct mipi_lcd_config {
++ u32 virtual_ch;
++ u32 data_lane_num;
++ /* device max DPHY clock in MHz unit */
++ u32 max_phy_clk;
++ enum mipi_dsi_dpi_fmt dpi_fmt;
++};
++
++struct mipi_dsi_info;
++struct mipi_dsi_lcd_callback {
++ /* callback for lcd panel operation */
++ void (*get_mipi_lcd_videomode)(struct fb_videomode **, int *,
++ struct mipi_lcd_config **);
++ int (*mipi_lcd_setup)(struct mipi_dsi_info *);
++
++};
++
++struct mipi_dsi_match_lcd {
++ char *lcd_panel;
++ struct mipi_dsi_lcd_callback lcd_callback;
++};
++
++struct mipi_dsi_bus_mux {
++ int reg;
++ int mask;
++ int (*get_mux) (int dev_id, int disp_id);
++};
++
++/* driver private data */
++struct mipi_dsi_info {
++ struct platform_device *pdev;
++ void __iomem *mmio_base;
++ struct regmap *regmap;
++ const struct mipi_dsi_bus_mux *bus_mux;
++ int dsi_power_on;
++ int lcd_inited;
++ u32 dphy_pll_config;
++ int dev_id;
++ int disp_id;
++ char *lcd_panel;
++ int irq;
++ struct clk *dphy_clk;
++ struct clk *cfg_clk;
++ struct mxc_dispdrv_handle *disp_mipi;
++ struct fb_videomode *mode;
++ struct regulator *disp_power_on;
++ struct mipi_lcd_config *lcd_config;
++ /* board related power control */
++ struct backlight_device *bl;
++ /* callback for lcd panel operation */
++ struct mipi_dsi_lcd_callback *lcd_callback;
++};
++
++int mipi_dsi_pkt_write(struct mipi_dsi_info *mipi,
++ u8 data_type, const u32 *buf, int len);
++int mipi_dsi_pkt_read(struct mipi_dsi_info *mipi,
++ u8 data_type, u32 *buf, int len);
++int mipi_dsi_dcs_cmd(struct mipi_dsi_info *mipi,
++ u8 cmd, const u32 *param, int num);
++
++#ifdef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++void mipid_hx8369_get_lcd_videomode(struct fb_videomode **mode, int *size,
++ struct mipi_lcd_config **data);
++int mipid_hx8369_lcd_setup(struct mipi_dsi_info *);
++#endif
++
++#ifndef CONFIG_FB_MXC_TRULY_WVGA_SYNC_PANEL
++#error "Please configure MIPI LCD panel, we cannot find one!"
++#endif
++
++#endif
+diff -Nur linux-4.1.3/drivers/video/mxc/mxc_dcic.c linux-xbian-imx6/drivers/video/mxc/mxc_dcic.c
+--- linux-4.1.3/drivers/video/mxc/mxc_dcic.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mxc_dcic.c 2015-07-27 23:13:08.749764128 +0200
+@@ -0,0 +1,697 @@
++/*
++ * Copyright (C) 2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++#include <linux/clk.h>
++#include <linux/cdev.h>
++#include <linux/delay.h>
++#include <linux/err.h>
++#include <linux/fs.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/ioctl.h>
++#include <linux/interrupt.h>
++#include <linux/mfd/syscon.h>
++#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
++#include <linux/module.h>
++#include <linux/mxc_dcic.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/regmap.h>
++#include <linux/types.h>
++#include <linux/uaccess.h>
++#include <video/videomode.h>
++#include <video/of_videomode.h>
++
++#define DRIVER_NAME "mxc_dcic"
++
++#define DCIC_IPU1_DI0 "dcic-ipu1-di0"
++#define DCIC_IPU1_DI1 "dcic-ipu1-di1"
++#define DCIC_IPU2_DI0 "dcic-ipu2-di0"
++#define DCIC_IPU2_DI1 "dcic-ipu2-di1"
++#define DCIC_LCDIF "dcic-lcdif"
++#define DCIC_LCDIF1 "dcic-lcdif1"
++#define DCIC_LCDIF2 "dcic-lcdif2"
++#define DCIC_LVDS "dcic-lvds"
++#define DCIC_LVDS0 "dcic-lvds0"
++#define DCIC_LVDS1 "dcic-lvds1"
++#define DCIC_HDMI "dcic-hdmi"
++
++#define DCIC0_DEV_NAME "mxc_dcic0"
++#define DCIC1_DEV_NAME "mxc_dcic1"
++
++#define FB_SYNC_OE_LOW_ACT 0x80000000
++#define FB_SYNC_CLK_LAT_FALL 0x40000000
++
++static wait_queue_head_t mxc_dcic_wait;
++static int mxc_dcic_vsync;
++static uint16_t mxc_dcic_irq;
++static unsigned long mxc_dcic_counter;
++static uint16_t mxc_dcic_clients;
++
++static const struct dcic_mux imx6q_dcic0_mux[] = {
++ {
++ .dcic = DCIC_IPU1_DI0,
++ .val = IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI0,
++ }, {
++ .dcic = DCIC_LVDS0,
++ .val = IMX6Q_GPR10_DCIC1_MUX_CTL_LVDS0,
++ }, {
++ .dcic = DCIC_LVDS1,
++ .val = IMX6Q_GPR10_DCIC1_MUX_CTL_LVDS1,
++ }, {
++ .dcic = DCIC_HDMI,
++ .val = IMX6Q_GPR10_DCIC1_MUX_CTL_HDMI,
++ }
++};
++
++static const struct dcic_mux imx6q_dcic1_mux[] = {
++ {
++ .dcic = DCIC_IPU1_DI1,
++ .val = IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI1,
++ }, {
++ .dcic = DCIC_LVDS0,
++ .val = IMX6Q_GPR10_DCIC2_MUX_CTL_LVDS0,
++ }, {
++ .dcic = DCIC_LVDS1,
++ .val = IMX6Q_GPR10_DCIC2_MUX_CTL_LVDS1,
++ }, {
++ .dcic = DCIC_HDMI,
++ .val = IMX6Q_GPR10_DCIC2_MUX_CTL_MIPI,
++ }
++};
++
++static const struct bus_mux imx6q_dcic_buses[] = {
++ {
++ .name = DCIC0_DEV_NAME,
++ .reg = IOMUXC_GPR10,
++ .shift = 0,
++ .mask = IMX6Q_GPR10_DCIC1_MUX_CTL_MASK,
++ .dcic_mux_num = ARRAY_SIZE(imx6q_dcic0_mux),
++ .dcics = imx6q_dcic0_mux,
++ }, {
++ .name = DCIC1_DEV_NAME,
++ .reg = IOMUXC_GPR10,
++ .shift = 2,
++ .mask = IMX6Q_GPR10_DCIC2_MUX_CTL_MASK,
++ .dcic_mux_num = ARRAY_SIZE(imx6q_dcic1_mux),
++ .dcics = imx6q_dcic1_mux,
++ }
++};
++
++static const struct dcic_info imx6q_dcic_info = {
++ .bus_mux_num = ARRAY_SIZE(imx6q_dcic_buses),
++ .buses = imx6q_dcic_buses,
++};
++
++static const struct dcic_mux imx6sx_dcic0_mux[] = {
++ {
++ .dcic = DCIC_LCDIF1,
++ .val = IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1,
++ }, {
++ .dcic = DCIC_LVDS,
++ .val = IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS,
++ }
++};
++
++static const struct dcic_mux imx6sx_dcic1_mux[] = {
++ {
++ .dcic = DCIC_LCDIF2,
++ .val = IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2,
++ }, {
++ .dcic = DCIC_LVDS,
++ .val = IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS,
++ }
++};
++
++static const struct bus_mux imx6sx_dcic_buses[] = {
++ {
++ .name = DCIC0_DEV_NAME,
++ .reg = IOMUXC_GPR5,
++ .shift = 1,
++ .mask = IMX6SX_GPR5_DISP_MUX_DCIC1_MASK,
++ .dcic_mux_num = ARRAY_SIZE(imx6sx_dcic0_mux),
++ .dcics = imx6sx_dcic0_mux,
++ }, {
++ .name = DCIC1_DEV_NAME,
++ .reg = IOMUXC_GPR5,
++ .shift = 2,
++ .mask = IMX6SX_GPR5_DISP_MUX_DCIC2_MASK,
++ .dcic_mux_num = ARRAY_SIZE(imx6sx_dcic1_mux),
++ .dcics = imx6sx_dcic1_mux,
++ }
++};
++
++static const struct dcic_info imx6sx_dcic_info = {
++ .bus_mux_num = ARRAY_SIZE(imx6sx_dcic_buses),
++ .buses = imx6sx_dcic_buses,
++};
++
++static const struct of_device_id dcic_dt_ids[] = {
++ { .compatible = "fsl,imx6q-dcic", .data = &imx6q_dcic_info, },
++ { .compatible = "fsl,imx6sx-dcic", .data = &imx6sx_dcic_info, },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, dcic_dt_ids);
++
++static int of_get_dcic_val(struct device_node *np, struct dcic_data *dcic)
++{
++ const char *mux;
++ int ret;
++ u32 i, dcic_id;
++
++ ret = of_property_read_string(np, "dcic_mux", &mux);
++ if (ret < 0) {
++ dev_err(dcic->dev, "Can not get dcic_mux\n");
++ return ret;
++ }
++ ret = of_property_read_u32(np, "dcic_id", &dcic_id);
++ if (ret < 0) {
++ dev_err(dcic->dev, "Can not get dcic_id\n");
++ return ret;
++ }
++
++ dcic->bus_n = dcic_id;
++
++ for (i = 0; i < dcic->buses[dcic_id].dcic_mux_num; i++)
++ if (!strcmp(mux, dcic->buses[dcic_id].dcics[i].dcic)) {
++ dcic->mux_n = i;
++ return dcic->buses[dcic_id].dcics[i].val;
++ }
++
++ return -EINVAL;
++}
++
++static void dcic_enable(struct dcic_data *dcic)
++{
++ u32 val;
++
++ val = readl(&dcic->regs->dcicc);
++ val |= DCICC_IC_ENABLE;
++ writel(val, &dcic->regs->dcicc);
++}
++
++void dcic_disable(struct dcic_data *dcic)
++{
++ u32 val;
++
++ val = readl(&dcic->regs->dcicc);
++ val &= ~DCICC_IC_MASK;
++ val |= DCICC_IC_DISABLE;
++ writel(val, &dcic->regs->dcicc);
++}
++
++static void roi_enable(struct dcic_data *dcic, struct roi_params *roi_param)
++{
++ u32 val;
++ u32 roi_n = roi_param->roi_n;
++
++ val = readl(&dcic->regs->ROI[roi_n].dcicrc);
++ val |= DCICRC_ROI_ENABLE;
++ if (roi_param->freeze)
++ val |= DCICRC_ROI_FROZEN;
++ writel(val, &dcic->regs->ROI[roi_n].dcicrc);
++}
++
++static void roi_disable(struct dcic_data *dcic, u32 roi_n)
++{
++ u32 val;
++
++ val = readl(&dcic->regs->ROI[roi_n].dcicrc);
++ val &= ~DCICRC_ROI_ENABLE;
++ writel(val, &dcic->regs->ROI[roi_n].dcicrc);
++}
++
++static bool roi_configure(struct dcic_data *dcic, struct roi_params *roi_param)
++{
++ struct roi_regs *roi_reg;
++ u32 val;
++
++ if (roi_param->roi_n < 0 || roi_param->roi_n >= 16) {
++ pr_err("Error, Wrong ROI number %d\n", roi_param->roi_n);
++ return false;
++ }
++
++ if (roi_param->end_x <= roi_param->start_x ||
++ roi_param->end_y <= roi_param->start_y) {
++ pr_err("Error, Wrong ROI\n");
++ return false;
++ }
++
++ roi_reg = (struct roi_regs *) &dcic->regs->ROI[roi_param->roi_n];
++
++ /* init roi block size */
++ val = roi_param->start_y << 16 | roi_param->start_x;
++ writel(val, &roi_reg->dcicrc);
++
++ val = roi_param->end_y << 16 | roi_param->end_x;
++ writel(val, &roi_reg->dcicrs);
++
++ writel(roi_param->ref_sig, &roi_reg->dcicrrs);
++
++ roi_enable(dcic, roi_param);
++ return true;
++}
++
++static void dcic_int_enable(struct dcic_data *dcic)
++{
++ u32 val;
++
++ /* Clean pending interrupt before enable int */
++ writel(DCICS_FI_STAT_PENDING, &dcic->regs->dcics);
++ writel(0xffffffff, &dcic->regs->dcics);
++
++ /* Enable function interrupt */
++ val = readl(&dcic->regs->dcicic);
++ val &= ~DCICIC_FUN_INT_MASK;
++ val |= DCICIC_FUN_INT_ENABLE;
++ writel(val, &dcic->regs->dcicic);
++}
++
++static void dcic_int_disable(struct dcic_data *dcic)
++{
++ u32 val;
++
++ /* Disable both function and error interrupt */
++ val = readl(&dcic->regs->dcicic);
++ val = DCICIC_ERROR_INT_DISABLE | DCICIC_FUN_INT_DISABLE;
++ writel(val, &dcic->regs->dcicic);
++}
++
++static irqreturn_t dcic_irq_handler(int irq, void *data)
++{
++ u32 i;
++
++ struct dcic_data *dcic = data;
++ u32 dcics = readl(&dcic->regs->dcics);
++
++ dcic->result = dcics & 0xffff;
++
++ if (!mxc_dcic_vsync)
++ dcic_int_disable(dcic);
++ else {
++ mxc_dcic_irq = -1;
++ mxc_dcic_counter++;
++ }
++
++ /* clean dcic interrupt state */
++ writel(DCICS_FI_STAT_PENDING, &dcic->regs->dcics);
++ writel(dcics, &dcic->regs->dcics);
++
++ if (mxc_dcic_vsync) {
++ wake_up(&mxc_dcic_wait);
++ return IRQ_HANDLED;
++ }
++
++ for (i = 0; i < 16; i++) {
++ pr_debug("ROI=%d,crcRS=0x%x, crcCS=0x%x\n", i,
++ readl(&dcic->regs->ROI[i].dcicrrs),
++ readl(&dcic->regs->ROI[i].dcicrcs));
++ }
++ complete(&dcic->roi_crc_comp);
++
++ return IRQ_HANDLED;
++}
++
++static int dcic_configure(struct dcic_data *dcic, unsigned int sync)
++{
++ u32 val;
++ val = 0;
++
++ /* vsync, hsync, DE, clk_pol */
++ if (!(sync & FB_SYNC_HOR_HIGH_ACT))
++ val |= DCICC_HSYNC_POL_ACTIVE_LOW;
++ if (!(sync & FB_SYNC_VERT_HIGH_ACT))
++ val |= DCICC_VSYNC_POL_ACTIVE_LOW;
++ if (sync & FB_SYNC_OE_LOW_ACT)
++ val |= DCICC_DE_ACTIVE_LOW;
++ if (sync & FB_SYNC_CLK_LAT_FALL)
++ val |= DCICC_CLK_POL_INVERTED;
++
++ writel(val, &dcic->regs->dcicc);
++ return 0;
++}
++
++static int dcic_open(struct inode *inode, struct file *file)
++{
++ struct dcic_data *dcic = container_of(inode->i_cdev, struct dcic_data, cdev);
++ struct dcic_private *dcic_client;
++ int i = 0;
++
++ dcic_client = devm_kzalloc(dcic->dev,
++ sizeof(struct dcic_private),
++ GFP_KERNEL);
++ if (!dcic_client) {
++ dev_err(dcic->dev, "Cannot allocate device data\n");
++ return -ENOMEM;
++ }
++
++ mutex_lock(&dcic->lock);
++
++ if (mxc_dcic_clients == 0xffff) {
++ mutex_unlock(&dcic->lock);
++ return -EBUSY;
++ }
++
++ clk_prepare_enable(dcic->disp_axi_clk);
++ clk_prepare_enable(dcic->dcic_clk);
++
++ dcic_client->dcic = dcic;
++ file->private_data = dcic_client;
++
++ while (mxc_dcic_clients & BIT(i++));
++ mxc_dcic_clients |= BIT(i);
++ dcic_client->client_id |= BIT(i);
++
++ mutex_unlock(&dcic->lock);
++ return 0;
++}
++
++static int dcic_release(struct inode *inode, struct file *file)
++{
++ struct dcic_private *dcic_client = file->private_data;
++ struct dcic_data *dcic = dcic_client->dcic;
++ u32 i;
++
++ mutex_lock(&dcic->lock);
++
++ mxc_dcic_clients &= ~dcic_client->client_id;
++ if (!mxc_dcic_clients) {
++ for (i = 0; i < 16; i++)
++ roi_disable(dcic, i);
++
++ clk_disable_unprepare(dcic->dcic_clk);
++ clk_disable_unprepare(dcic->disp_axi_clk);
++ }
++
++ mutex_unlock(&dcic->lock);
++ devm_kfree(dcic->dev, dcic_client);
++ return 0;
++}
++
++static int dcic_init(struct device_node *np, struct dcic_data *dcic)
++{
++ u32 val, bus;
++
++ val = of_get_dcic_val(np, dcic);
++ if (val < 0) {
++ pr_err("Error incorrect\n");
++ return -1;
++ }
++
++ bus = dcic->bus_n;
++
++ regmap_update_bits(dcic->regmap, dcic->buses[bus].reg ,
++ dcic->buses[bus].mask, val);
++
++ return 0;
++}
++
++static long dcic_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int __user *argp = (void __user *)arg;
++ struct dcic_private *dcic_client = file->private_data;
++ struct dcic_data *dcic = dcic_client->dcic;
++ struct roi_params roi_param;
++ unsigned int sync;
++ int ret = 0;
++ int i, t = 0;
++
++ if (cmd == DCIC_IOC_STOP_VSYNC || cmd == DCIC_IOC_START_VSYNC) {
++ for (i = 0; i < 16; i++)
++ if (mxc_dcic_clients & BIT(i) && ++t > 1)
++ return 0;
++ }
++
++ switch (cmd) {
++ case DCIC_IOC_CONFIG_DCIC:
++ if (!copy_from_user(&sync, argp, sizeof(unsigned int)))
++ dcic_configure(dcic, sync);
++ break;
++ case DCIC_IOC_CONFIG_ROI:
++ if (copy_from_user(&roi_param, argp, sizeof(roi_param)))
++ return -EFAULT;
++ else
++ if (!roi_configure(dcic, &roi_param))
++ return -EINVAL;
++ break;
++ case DCIC_IOC_GET_RESULT:
++ init_completion(&dcic->roi_crc_comp);
++
++ dcic_enable(dcic);
++
++ dcic->result = 0;
++ msleep(25);
++
++ dcic_int_enable(dcic);
++
++ ret = wait_for_completion_interruptible_timeout(
++ &dcic->roi_crc_comp, 1 * HZ);
++ if (ret == 0) {
++ dev_err(dcic->dev,
++ "dcic wait for roi crc cal timeout\n");
++ ret = -ETIME;
++ } else if (ret > 0) {
++ if (copy_to_user(argp, &dcic->result, sizeof(dcic->result)))
++ return -EFAULT;
++ ret = 0;
++ }
++ dcic_disable(dcic);
++ break;
++ case DCIC_IOC_START_VSYNC:
++ mxc_dcic_vsync = 1;
++ mxc_dcic_irq = 0;
++ mxc_dcic_counter = 0;
++
++ // configure minimum roi block
++ roi_param.roi_n = 0;
++ roi_param.end_x = 1;
++ roi_param.start_x = 0;
++ roi_param.end_y = 1;
++ roi_param.start_y = 0;
++ roi_configure(dcic, &roi_param);
++
++ dcic_enable(dcic);
++ dcic_int_enable(dcic);
++ break;
++ case DCIC_IOC_STOP_VSYNC:
++ mxc_dcic_vsync = 0;
++ mxc_dcic_irq = 0;
++ init_completion(&dcic->roi_crc_comp);
++ wait_for_completion_interruptible_timeout(&dcic->roi_crc_comp, 1 * HZ);
++ dcic_disable(dcic);
++ break;
++ default:
++ pr_err("%s, Unsupport cmd %d\n", __func__, cmd);
++ break;
++ }
++ return ret;
++}
++
++static ssize_t dcic_read(struct file *file, char __user *buf, size_t count,
++ loff_t *ppos)
++{
++ struct dcic_private *dcic_client = file->private_data;
++ int ret = 0;
++
++ do {
++ if (mxc_dcic_irq & dcic_client->client_id) {
++ count = min(sizeof(unsigned long), count);
++ ret = copy_to_user(buf, &mxc_dcic_counter, count) ? -EFAULT : count;
++ mxc_dcic_irq &= ~dcic_client->client_id;
++ break;
++ }
++ if (file->f_flags & O_NONBLOCK) {
++ ret = -EAGAIN;
++ }
++ else if (wait_event_interruptible(mxc_dcic_wait, mxc_dcic_irq & dcic_client->client_id))
++ ret = -ERESTARTSYS;
++ } while(!ret);
++
++ return ret;
++}
++
++static const struct file_operations mxc_dcic_fops = {
++ .owner = THIS_MODULE,
++ .open = dcic_open,
++ .release = dcic_release,
++ .unlocked_ioctl = dcic_ioctl,
++ .read = dcic_read,
++};
++
++static int dcic_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ const struct of_device_id *of_id =
++ of_match_device(dcic_dt_ids, dev);
++ const struct dcic_info *dcic_info =
++ (const struct dcic_info *)of_id->data;
++ struct device_node *np = dev->of_node;
++ struct dcic_data *dcic;
++ struct resource *res;
++ const char *name;
++ dev_t devt;
++ int ret = 0;
++ int irq;
++
++ dcic = devm_kzalloc(&pdev->dev,
++ sizeof(struct dcic_data),
++ GFP_KERNEL);
++ if (!dcic) {
++ dev_err(&pdev->dev, "Cannot allocate device data\n");
++ ret = -ENOMEM;
++ goto ealloc;
++ }
++
++ platform_set_drvdata(pdev, dcic);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "No dcic base address found.\n");
++ ret = -ENODEV;
++ goto ealloc;
++ }
++
++ dcic->regs = (struct dcic_regs *) devm_ioremap(&pdev->dev, res->start, resource_size(res));
++ if (!dcic->regs) {
++ dev_err(&pdev->dev, "ioremap failed with dcic base\n");
++ ret = -ENOMEM;
++ goto ealloc;
++ }
++
++ dcic->dev = dev;
++ dcic->buses = dcic_info->buses;
++
++ dcic->regmap = syscon_regmap_lookup_by_phandle(np, "gpr");
++ if (IS_ERR(dcic->regmap)) {
++ dev_err(dev, "failed to get parent regmap\n");
++ ret = PTR_ERR(dcic->regmap);
++ goto ealloc;
++ }
++
++ /* clock */
++ dcic->disp_axi_clk = devm_clk_get(&pdev->dev, "disp-axi");
++ if (IS_ERR(dcic->disp_axi_clk)) {
++ dev_err(&pdev->dev, "get disp-axi clock failed\n");
++ ret = PTR_ERR(dcic->disp_axi_clk);
++ goto ealloc;
++ }
++
++ dcic->dcic_clk = devm_clk_get(&pdev->dev, "dcic");
++ if (IS_ERR(dcic->dcic_clk)) {
++ dev_err(&pdev->dev, "get dcic clk failed\n");
++ ret = PTR_ERR(dcic->dcic_clk);
++ goto ealloc;
++ }
++
++ mutex_init(&dcic->lock);
++ ret = dcic_init(np, dcic);
++ if (ret < 0) {
++ pr_err("Failed init dcic\n");
++ goto ealloc;
++ }
++
++ /* register device */
++ name = dcic->buses[dcic->bus_n].name;
++ dcic->major = register_chrdev(0, name, &mxc_dcic_fops);
++ if (dcic->major < 0) {
++ pr_err("DCIC: unable to get a major for dcic\n");
++ ret = -EBUSY;
++ goto ealloc;
++ }
++
++ dcic->class = class_create(THIS_MODULE, name);
++ if (IS_ERR(dcic->class)) {
++ ret = PTR_ERR(dcic->class);
++ goto err_out_chrdev;
++ }
++
++ /* create char device */
++ devt = MKDEV(dcic->major, 0);
++ dcic->devt = devt;
++
++ cdev_init(&dcic->cdev, &mxc_dcic_fops);
++ dcic->cdev.owner = THIS_MODULE;
++ ret = cdev_add(&dcic->cdev, devt, 1);
++ if (ret)
++ goto err_out_class;
++
++ device_create(dcic->class, NULL, devt,
++ NULL, name);
++
++ /* IRQ */
++ irq = platform_get_irq(pdev, 0);
++
++ ret = devm_request_irq(&pdev->dev, irq, dcic_irq_handler, 0,
++ dev_name(&pdev->dev), dcic);
++ if (ret) {
++ dev_err(&pdev->dev, "request_irq (%d) failed with error %d\n",
++ irq, ret);
++ goto err_out_cdev;
++ }
++
++ init_waitqueue_head(&mxc_dcic_wait);
++ mxc_dcic_vsync = 0;
++ mxc_dcic_irq = 0;
++
++ return 0;
++
++err_out_cdev:
++ cdev_del(&dcic->cdev);
++err_out_class:
++ device_destroy(dcic->class, devt);
++ class_destroy(dcic->class);
++err_out_chrdev:
++ unregister_chrdev(dcic->major, name);
++ealloc:
++ return ret;
++}
++
++static int dcic_remove(struct platform_device *pdev)
++{
++ struct dcic_data *dcic = platform_get_drvdata(pdev);
++ const char *name;
++
++ name = dcic->buses[dcic->bus_n].name;
++
++ device_destroy(dcic->class, dcic->devt);
++ cdev_del(&dcic->cdev);
++ class_destroy(dcic->class);
++ unregister_chrdev(dcic->major, name);
++ mutex_destroy(&dcic->lock);
++
++ return 0;
++}
++
++static struct platform_driver dcic_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .of_match_table = dcic_dt_ids,
++ },
++ .probe = dcic_probe,
++ .remove = dcic_remove,
++};
++
++module_platform_driver(dcic_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MXC DCIC driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:" DRIVER_NAME);
+diff -Nur linux-4.1.3/drivers/video/mxc/mxc_dispdrv.c linux-xbian-imx6/drivers/video/mxc/mxc_dispdrv.c
+--- linux-4.1.3/drivers/video/mxc/mxc_dispdrv.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mxc_dispdrv.c 2015-07-27 23:13:08.749764128 +0200
+@@ -0,0 +1,150 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @file mxc_dispdrv.c
++ * @brief mxc display driver framework.
++ *
++ * A display device driver could call mxc_dispdrv_register(drv) in its dev_probe() function.
++ * Move all dev_probe() things into mxc_dispdrv_driver->init(), init() function should init
++ * and feedback setting;
++ * Necessary deferred operations can be done in mxc_dispdrv_driver->post_init(),
++ * after dev_id and disp_id pass usage check;
++ * Move all dev_remove() things into mxc_dispdrv_driver->deinit();
++ * Move all dev_suspend() things into fb_notifier for SUSPEND, if there is;
++ * Move all dev_resume() things into fb_notifier for RESUME, if there is;
++ *
++ * ipuv3 fb driver could call mxc_dispdrv_gethandle(name, setting) before a fb
++ * need be added, with fbi param passing by setting, after
++ * mxc_dispdrv_gethandle() return, FB driver should get the basic setting
++ * about fbi info and ipuv3-hw (ipu_id and disp_id).
++ *
++ * @ingroup Framebuffer
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include "mxc_dispdrv.h"
++
++static LIST_HEAD(dispdrv_list);
++static DEFINE_MUTEX(dispdrv_lock);
++
++struct mxc_dispdrv_entry {
++ /* Note: drv always the first element */
++ struct mxc_dispdrv_driver *drv;
++ bool active;
++ void *priv;
++ struct list_head list;
++};
++
++struct mxc_dispdrv_handle *mxc_dispdrv_register(struct mxc_dispdrv_driver *drv)
++{
++ struct mxc_dispdrv_entry *new;
++
++ mutex_lock(&dispdrv_lock);
++
++ new = kzalloc(sizeof(struct mxc_dispdrv_entry), GFP_KERNEL);
++ if (!new) {
++ mutex_unlock(&dispdrv_lock);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ new->drv = drv;
++ list_add_tail(&new->list, &dispdrv_list);
++
++ mutex_unlock(&dispdrv_lock);
++
++ return (struct mxc_dispdrv_handle *)new;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_register);
++
++int mxc_dispdrv_unregister(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ mutex_lock(&dispdrv_lock);
++ list_del(&entry->list);
++ mutex_unlock(&dispdrv_lock);
++ kfree(entry);
++ return 0;
++ } else
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_unregister);
++
++struct mxc_dispdrv_handle *mxc_dispdrv_gethandle(char *name,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret, found = 0;
++ struct mxc_dispdrv_entry *entry;
++
++ mutex_lock(&dispdrv_lock);
++ list_for_each_entry(entry, &dispdrv_list, list) {
++ if (!strcmp(entry->drv->name, name) && (entry->drv->init)) {
++ ret = entry->drv->init((struct mxc_dispdrv_handle *)
++ entry, setting);
++ if (ret >= 0) {
++ entry->active = true;
++ found = 1;
++ break;
++ }
++ }
++ }
++ mutex_unlock(&dispdrv_lock);
++
++ return found ? (struct mxc_dispdrv_handle *)entry:ERR_PTR(-ENODEV);
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_gethandle);
++
++void mxc_dispdrv_puthandle(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ mutex_lock(&dispdrv_lock);
++ if (entry && entry->active && entry->drv->deinit) {
++ entry->drv->deinit(handle);
++ entry->active = false;
++ }
++ mutex_unlock(&dispdrv_lock);
++
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_puthandle);
++
++int mxc_dispdrv_setdata(struct mxc_dispdrv_handle *handle, void *data)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ entry->priv = data;
++ return 0;
++ } else
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_setdata);
++
++void *mxc_dispdrv_getdata(struct mxc_dispdrv_handle *handle)
++{
++ struct mxc_dispdrv_entry *entry = (struct mxc_dispdrv_entry *)handle;
++
++ if (entry) {
++ return entry->priv;
++ } else
++ return ERR_PTR(-EINVAL);
++}
++EXPORT_SYMBOL_GPL(mxc_dispdrv_getdata);
+diff -Nur linux-4.1.3/drivers/video/mxc/mxc_dispdrv.h linux-xbian-imx6/drivers/video/mxc/mxc_dispdrv.h
+--- linux-4.1.3/drivers/video/mxc/mxc_dispdrv.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mxc_dispdrv.h 2015-07-27 23:13:08.749764128 +0200
+@@ -0,0 +1,54 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++#ifndef __MXC_DISPDRV_H__
++#define __MXC_DISPDRV_H__
++#include <linux/fb.h>
++
++struct mxc_dispdrv_handle {
++ struct mxc_dispdrv_driver *drv;
++};
++
++struct mxc_dispdrv_setting {
++ /*input-feedback parameter*/
++ struct fb_info *fbi;
++ int if_fmt;
++ int default_bpp;
++ char *dft_mode_str;
++
++ /*feedback parameter*/
++ int dev_id;
++ int disp_id;
++};
++
++struct mxc_dispdrv_driver {
++ const char *name;
++ int (*init) (struct mxc_dispdrv_handle *, struct mxc_dispdrv_setting *);
++ /* deferred operations after dev_id and disp_id pass usage check */
++ int (*post_init) (struct mxc_dispdrv_handle *, int dev_id, int disp_id);
++ void (*deinit) (struct mxc_dispdrv_handle *);
++ /* display driver enable function for extension */
++ int (*enable) (struct mxc_dispdrv_handle *);
++ /* display driver disable function, called at early part of fb_blank */
++ void (*disable) (struct mxc_dispdrv_handle *);
++ /* display driver setup function, called at early part of fb_set_par */
++ int (*setup) (struct mxc_dispdrv_handle *, struct fb_info *fbi);
++};
++
++struct mxc_dispdrv_handle *mxc_dispdrv_register(struct mxc_dispdrv_driver *drv);
++int mxc_dispdrv_unregister(struct mxc_dispdrv_handle *handle);
++struct mxc_dispdrv_handle *mxc_dispdrv_gethandle(char *name,
++ struct mxc_dispdrv_setting *setting);
++void mxc_dispdrv_puthandle(struct mxc_dispdrv_handle *handle);
++int mxc_dispdrv_setdata(struct mxc_dispdrv_handle *handle, void *data);
++void *mxc_dispdrv_getdata(struct mxc_dispdrv_handle *handle);
++#endif
+diff -Nur linux-4.1.3/drivers/video/mxc/mxc_edid.c linux-xbian-imx6/drivers/video/mxc/mxc_edid.c
+--- linux-4.1.3/drivers/video/mxc/mxc_edid.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mxc_edid.c 2015-07-27 23:13:08.753749907 +0200
+@@ -0,0 +1,828 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxc_edid.c
++ *
++ * @brief MXC EDID driver
++ *
++ * @ingroup Framebuffer
++ */
++
++/*!
++ * Include files
++ */
++#include <linux/i2c.h>
++#include <linux/fb.h>
++#include <video/mxc_edid.h>
++#include "../fbdev/edid.h"
++
++#undef DEBUG /* define this for verbose EDID parsing output */
++#ifdef DEBUG
++#define DPRINTK(fmt, args...) printk(fmt, ## args)
++#else
++#define DPRINTK(fmt, args...)
++#endif
++
++const struct fb_videomode mxc_cea_mode[64] = {
++ /* #1: 640x480p@59.94/60Hz 4:3 */
++ [1] = {
++ NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #2: 720x480p@59.94/60Hz 4:3 */
++ [2] = {
++ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #3: 720x480p@59.94/60Hz 16:9 */
++ [3] = {
++ NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #4: 1280x720p@59.94/60Hz 16:9 */
++ [4] = {
++ NULL, 60, 1280, 720, 13468, 220, 110, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++ /* #5: 1920x1080i@59.94/60Hz 16:9 */
++ [5] = {
++ NULL, 60, 1920, 1080, 13468, 88, 148, 4, 31, 44, 10,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #6: 720(1440)x480iH@59.94/60Hz 4:3 */
++ [6] = {
++ NULL, 60, 1440, 480, 37037, 38, 114, 8, 31, 124, 6, 0,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #7: 720(1440)x480iH@59.94/60Hz 16:9 */
++ [7] = {
++ NULL, 60, 1440, 480, 37037, 38, 114, 8, 31, 124, 6, 0,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #8: 720(1440)x240pH@59.94/60Hz 4:3 */
++ [8] = {
++ NULL, 60, 1440, 240, 37108, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #9: 720(1440)x240pH@59.94/60Hz 16:9 */
++ [9] = {
++ NULL, 60, 1440, 240, 37108, 114, 38, 15, 4, 124, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #14: 1440x480p@59.94/60Hz 4:3 */
++ [14] = {
++ NULL, 60, 1440, 480, 18500, 120, 32, 30, 9, 124, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #15: 1440x480p@59.94/60Hz 16:9 */
++ [15] = {
++ NULL, 60, 1440, 480, 18500, 120, 32, 30, 9, 124, 6, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #16: 1920x1080p@60Hz 16:9 */
++ [16] = {
++ NULL, 60, 1920, 1080, 6734, 148, 88, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #17: 720x576pH@50Hz 4:3 */
++ [17] = {
++ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #18: 720x576pH@50Hz 16:9 */
++ [18] = {
++ NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #19: 1280x720p@50Hz */
++ [19] = {
++ NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #20: 1920x1080i@50Hz */
++ [20] = {
++ NULL, 50, 1920, 1080, 13468, 148, 528, 4, 31, 44, 10,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #21: 720(1440)x576i@50Hz */
++ [21] = {
++ NULL, 50, 1440, 576, 37037, 24, 138, 4, 39, 126, 6, 0,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #22: 720(1440)x576i@50Hz */
++ [22] = {
++ NULL, 50, 1440, 576, 37037, 24, 138, 4, 39, 126, 6, 0,
++ FB_VMODE_INTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #23: 720(1440)x288pH@50Hz 4:3 */
++ [23] = {
++ NULL, 50, 1440, 288, 37037, 138, 24, 19, 2, 126, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #24: 720(1440)x288pH@50Hz 16:9 */
++ [24] = {
++ NULL, 50, 1440, 288, 37037, 138, 24, 19, 2, 126, 3, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #29: 720(1440)x576pH@50Hz 4:3 */
++ [29] = {
++ NULL, 50, 1440, 576, 18518, 136, 24, 39, 5, 128, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, 0,
++ },
++ /* #30: 720(1440)x576pH@50Hz 16:9 */
++ [30] = {
++ NULL, 50, 1440, 576, 18518, 136, 24, 39, 5, 128, 5, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #31: 1920x1080p@50Hz */
++ [31] = {
++ NULL, 50, 1920, 1080, 6734, 148, 528, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #32: 1920x1080p@23.98/24Hz */
++ [32] = {
++ NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #33: 1920x1080p@25Hz */
++ [33] = {
++ NULL, 25, 1920, 1080, 13468, 148, 528, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #34: 1920x1080p@30Hz */
++ [34] = {
++ NULL, 30, 1920, 1080, 13468, 148, 88, 36, 4, 44, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0,
++ },
++ /* #41: 1280x720p@100Hz 16:9 */
++ [41] = {
++ NULL, 100, 1280, 720, 6734, 220, 440, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++ /* #47: 1280x720p@119.88/120Hz 16:9 */
++ [47] = {
++ NULL, 120, 1280, 720, 6734, 220, 110, 20, 5, 40, 5,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_16_9, 0
++ },
++};
++
++/*
++ * We have a special version of fb_mode_is_equal that ignores
++ * pixclock, since for many CEA modes, 2 frequencies are supported
++ * e.g. 640x480 @ 60Hz or 59.94Hz
++ */
++int mxc_edid_fb_mode_is_equal(bool use_aspect,
++ const struct fb_videomode *mode1,
++ const struct fb_videomode *mode2,
++ u32 mode_mask)
++{
++ u32 mask;
++
++ if (use_aspect)
++ mask = ~0;
++ else
++ mask = ~FB_VMODE_ASPECT_MASK;
++
++ return (mode1->xres == mode2->xres &&
++ mode1->yres == mode2->yres &&
++ mode1->hsync_len == mode2->hsync_len &&
++ mode1->vsync_len == mode2->vsync_len &&
++ mode1->left_margin == mode2->left_margin &&
++ mode1->right_margin == mode2->right_margin &&
++ mode1->upper_margin == mode2->upper_margin &&
++ mode1->lower_margin == mode2->lower_margin &&
++ mode1->sync == mode2->sync &&
++ /* refresh check, 59.94Hz and 60Hz have the same parameter
++ * in struct of mxc_cea_mode */
++ abs(mode1->refresh - mode2->refresh) <= 1 &&
++ (mode1->vmode & mask & mode_mask) ==
++ (mode2->vmode & mask & mode_mask));
++}
++EXPORT_SYMBOL(mxc_edid_fb_mode_is_equal);
++
++static void get_detailed_timing(unsigned char *block,
++ struct fb_videomode *mode)
++{
++ mode->xres = H_ACTIVE;
++ mode->yres = V_ACTIVE;
++ mode->pixclock = PIXEL_CLOCK;
++ mode->pixclock /= 1000;
++ mode->pixclock = KHZ2PICOS(mode->pixclock);
++ mode->right_margin = H_SYNC_OFFSET;
++ mode->left_margin = (H_ACTIVE + H_BLANKING) -
++ (H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH);
++ mode->upper_margin = V_BLANKING - V_SYNC_OFFSET -
++ V_SYNC_WIDTH;
++ mode->lower_margin = V_SYNC_OFFSET;
++ mode->hsync_len = H_SYNC_WIDTH;
++ mode->vsync_len = V_SYNC_WIDTH;
++ if (HSYNC_POSITIVE)
++ mode->sync |= FB_SYNC_HOR_HIGH_ACT;
++ if (VSYNC_POSITIVE)
++ mode->sync |= FB_SYNC_VERT_HIGH_ACT;
++ mode->refresh = PIXEL_CLOCK/((H_ACTIVE + H_BLANKING) *
++ (V_ACTIVE + V_BLANKING));
++ if (INTERLACED) {
++ mode->yres *= 2;
++ mode->upper_margin *= 2;
++ mode->lower_margin *= 2;
++ mode->vsync_len *= 2;
++ mode->vmode |= FB_VMODE_INTERLACED;
++ }
++ mode->flag = FB_MODE_IS_DETAILED;
++
++ if ((H_SIZE / 16) == (V_SIZE / 9))
++ mode->vmode |= FB_VMODE_ASPECT_16_9;
++ else if ((H_SIZE / 4) == (V_SIZE / 3))
++ mode->vmode |= FB_VMODE_ASPECT_4_3;
++ else if ((mode->xres / 16) == (mode->yres / 9))
++ mode->vmode |= FB_VMODE_ASPECT_16_9;
++ else if ((mode->xres / 4) == (mode->yres / 3))
++ mode->vmode |= FB_VMODE_ASPECT_4_3;
++
++ if (mode->vmode & FB_VMODE_ASPECT_16_9)
++ DPRINTK("Aspect ratio: 16:9\n");
++ if (mode->vmode & FB_VMODE_ASPECT_4_3)
++ DPRINTK("Aspect ratio: 4:3\n");
++ DPRINTK(" %d MHz ", PIXEL_CLOCK/1000000);
++ DPRINTK("%d %d %d %d ", H_ACTIVE, H_ACTIVE + H_SYNC_OFFSET,
++ H_ACTIVE + H_SYNC_OFFSET + H_SYNC_WIDTH, H_ACTIVE + H_BLANKING);
++ DPRINTK("%d %d %d %d ", V_ACTIVE, V_ACTIVE + V_SYNC_OFFSET,
++ V_ACTIVE + V_SYNC_OFFSET + V_SYNC_WIDTH, V_ACTIVE + V_BLANKING);
++ DPRINTK("%sHSync %sVSync\n\n", (HSYNC_POSITIVE) ? "+" : "-",
++ (VSYNC_POSITIVE) ? "+" : "-");
++}
++
++int mxc_edid_parse_ext_blk(unsigned char *edid,
++ struct mxc_edid_cfg *cfg,
++ struct fb_monspecs *specs)
++{
++ char detail_timing_desc_offset;
++ struct fb_videomode *mode, *m;
++ unsigned char index = 0x0;
++ unsigned char *block;
++ int i, num = 0, revision;
++
++ if (edid[index++] != 0x2) /* only support cea ext block now */
++ return -1;
++ revision = edid[index++];
++ DPRINTK("cea extent revision %d\n", revision);
++ mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL);
++ if (mode == NULL)
++ return -1;
++
++ detail_timing_desc_offset = edid[index++];
++
++ if (revision >= 2) {
++ cfg->cea_underscan = (edid[index] >> 7) & 0x1;
++ cfg->cea_basicaudio = (edid[index] >> 6) & 0x1;
++ cfg->cea_ycbcr444 = (edid[index] >> 5) & 0x1;
++ cfg->cea_ycbcr422 = (edid[index] >> 4) & 0x1;
++
++ DPRINTK("CEA underscan %d\n", cfg->cea_underscan);
++ DPRINTK("CEA basicaudio %d\n", cfg->cea_basicaudio);
++ DPRINTK("CEA ycbcr444 %d\n", cfg->cea_ycbcr444);
++ DPRINTK("CEA ycbcr422 %d\n", cfg->cea_ycbcr422);
++ }
++
++ if (revision >= 3) {
++ /* short desc */
++ DPRINTK("CEA Short desc timmings\n");
++ index++;
++ while (index < detail_timing_desc_offset) {
++ unsigned char tagcode, blklen;
++
++ tagcode = (edid[index] >> 5) & 0x7;
++ blklen = (edid[index]) & 0x1f;
++
++ DPRINTK("Tagcode %x Len %d\n", tagcode, blklen);
++
++ switch (tagcode) {
++ case 0x2: /*Video data block*/
++ {
++ int cea_idx;
++ i = 0;
++ while (i < blklen) {
++ index++;
++ cea_idx = edid[index] & 0x7f;
++ if (cea_idx < ARRAY_SIZE(mxc_cea_mode) &&
++ (mxc_cea_mode[cea_idx].xres)) {
++ DPRINTK("Support CEA Format #%d\n", cea_idx);
++ mode[num] = mxc_cea_mode[cea_idx];
++ mode[num].flag |= FB_MODE_IS_STANDARD;
++ num++;
++ }
++ i++;
++ }
++ break;
++ }
++ case 0x3: /*Vendor specific data*/
++ {
++ unsigned char IEEE_reg_iden[3];
++ unsigned char deep_color;
++ unsigned char latency_present;
++ unsigned char I_latency_present;
++ unsigned char hdmi_video_present;
++ unsigned char hdmi_3d_present;
++ unsigned char hdmi_3d_multi_present;
++ unsigned char hdmi_vic_len;
++ unsigned char hdmi_3d_len;
++ unsigned char index_inc = 0;
++ unsigned char vsd_end;
++
++ vsd_end = index + blklen;
++
++ IEEE_reg_iden[0] = edid[index+1];
++ IEEE_reg_iden[1] = edid[index+2];
++ IEEE_reg_iden[2] = edid[index+3];
++ cfg->physical_address[0] = (edid[index+4] & 0xf0) >> 4;
++ cfg->physical_address[1] = (edid[index+4] & 0x0f);
++ cfg->physical_address[2] = (edid[index+5] & 0xf0) >> 4;
++ cfg->physical_address[3] = (edid[index+5] & 0x0f);
++
++ if ((IEEE_reg_iden[0] == 0x03) &&
++ (IEEE_reg_iden[1] == 0x0c) &&
++ (IEEE_reg_iden[2] == 0x00))
++ cfg->hdmi_cap = 1;
++
++ if (blklen > 5) {
++ deep_color = edid[index+6];
++ if (deep_color & 0x80)
++ cfg->vsd_support_ai = true;
++ if (deep_color & 0x40)
++ cfg->vsd_dc_48bit = true;
++ if (deep_color & 0x20)
++ cfg->vsd_dc_36bit = true;
++ if (deep_color & 0x10)
++ cfg->vsd_dc_30bit = true;
++ if (deep_color & 0x08)
++ cfg->vsd_dc_y444 = true;
++ if (deep_color & 0x01)
++ cfg->vsd_dvi_dual = true;
++ }
++
++ DPRINTK("VSD hdmi capability %d\n", cfg->hdmi_cap);
++ DPRINTK("VSD support ai %d\n", cfg->vsd_support_ai);
++ DPRINTK("VSD support deep color 48bit %d\n", cfg->vsd_dc_48bit);
++ DPRINTK("VSD support deep color 36bit %d\n", cfg->vsd_dc_36bit);
++ DPRINTK("VSD support deep color 30bit %d\n", cfg->vsd_dc_30bit);
++ DPRINTK("VSD support deep color y444 %d\n", cfg->vsd_dc_y444);
++ DPRINTK("VSD support dvi dual %d\n", cfg->vsd_dvi_dual);
++
++ if (blklen > 6)
++ cfg->vsd_max_tmdsclk_rate = edid[index+7] * 5;
++ DPRINTK("VSD MAX TMDS CLOCK RATE %d\n", cfg->vsd_max_tmdsclk_rate);
++
++ if (blklen > 7) {
++ latency_present = edid[index+8] >> 7;
++ I_latency_present = (edid[index+8] & 0x40) >> 6;
++ hdmi_video_present = (edid[index+8] & 0x20) >> 5;
++ cfg->vsd_cnc3 = (edid[index+8] & 0x8) >> 3;
++ cfg->vsd_cnc2 = (edid[index+8] & 0x4) >> 2;
++ cfg->vsd_cnc1 = (edid[index+8] & 0x2) >> 1;
++ cfg->vsd_cnc0 = edid[index+8] & 0x1;
++
++ DPRINTK("VSD cnc0 %d\n", cfg->vsd_cnc0);
++ DPRINTK("VSD cnc1 %d\n", cfg->vsd_cnc1);
++ DPRINTK("VSD cnc2 %d\n", cfg->vsd_cnc2);
++ DPRINTK("VSD cnc3 %d\n", cfg->vsd_cnc3);
++ DPRINTK("latency_present %d\n", latency_present);
++ DPRINTK("I_latency_present %d\n", I_latency_present);
++ DPRINTK("hdmi_video_present %d\n", hdmi_video_present);
++
++ } else {
++ index += blklen;
++ break;
++ }
++
++ index += 9;
++
++ /*latency present */
++ if (latency_present) {
++ cfg->vsd_video_latency = edid[index++];
++ cfg->vsd_audio_latency = edid[index++];
++
++ if (I_latency_present) {
++ cfg->vsd_I_video_latency = edid[index++];
++ cfg->vsd_I_audio_latency = edid[index++];
++ } else {
++ cfg->vsd_I_video_latency = cfg->vsd_video_latency;
++ cfg->vsd_I_audio_latency = cfg->vsd_audio_latency;
++ }
++
++ DPRINTK("VSD latency video_latency %d\n", cfg->vsd_video_latency);
++ DPRINTK("VSD latency audio_latency %d\n", cfg->vsd_audio_latency);
++ DPRINTK("VSD latency I_video_latency %d\n", cfg->vsd_I_video_latency);
++ DPRINTK("VSD latency I_audio_latency %d\n", cfg->vsd_I_audio_latency);
++ }
++
++ if (hdmi_video_present) {
++ hdmi_3d_present = edid[index] >> 7;
++ hdmi_3d_multi_present = (edid[index] & 0x60) >> 5;
++ index++;
++ hdmi_vic_len = (edid[index] & 0xe0) >> 5;
++ hdmi_3d_len = edid[index] & 0x1f;
++ index++;
++
++ DPRINTK("hdmi_3d_present %d\n", hdmi_3d_present);
++ DPRINTK("hdmi_3d_multi_present %d\n", hdmi_3d_multi_present);
++ DPRINTK("hdmi_vic_len %d\n", hdmi_vic_len);
++ DPRINTK("hdmi_3d_len %d\n", hdmi_3d_len);
++
++ if (hdmi_vic_len > 0) {
++ for (i = 0; i < hdmi_vic_len; i++) {
++ cfg->hdmi_vic[i] = edid[index++];
++ DPRINTK("HDMI_vic=%d\n", cfg->hdmi_vic[i]);
++ }
++ }
++
++ if (hdmi_3d_len > 0) {
++ if (hdmi_3d_present) {
++ cfg->hdmi_3d_present = hdmi_3d_present;
++ if (hdmi_3d_multi_present == 0x1) {
++ cfg->hdmi_3d_struct_all = (edid[index] << 8) | edid[index+1];
++ index_inc = 2;
++ } else if (hdmi_3d_multi_present == 0x2) {
++ cfg->hdmi_3d_struct_all = (edid[index] << 8) | edid[index+1];
++ cfg->hdmi_3d_mask_all = (edid[index+2] << 8) | edid[index+3];
++ index_inc = 4;
++ } else
++ index_inc = 0;
++ cfg->hdmi_3d_len = hdmi_3d_len - index_inc;
++ cfg->hdmi_3d_multi_present = hdmi_3d_multi_present;
++ }
++
++ DPRINTK("HDMI 3d struct all =0x%x\n", cfg->hdmi_3d_struct_all);
++ DPRINTK("HDMI 3d mask all =0x%x\n", cfg->hdmi_3d_mask_all);
++
++ /* Read 2D vic 3D_struct */
++ if ((hdmi_3d_len - index_inc) > 0) {
++ DPRINTK("Support 3D video format\n");
++ i = 0;
++ while ((hdmi_3d_len - index_inc) > 0) {
++
++ cfg->hdmi_3d_format[i].vic_order_2d = edid[index+index_inc] >> 4;
++ cfg->hdmi_3d_format[i].struct_3d = edid[index+index_inc] & 0x0f;
++ index_inc++;
++
++ if (cfg->hdmi_3d_format[i].struct_3d == 8) {
++ cfg->hdmi_3d_format[i].detail_3d = edid[index+index_inc] >> 4;
++ index_inc++;
++ } else if (cfg->hdmi_3d_format[i].struct_3d > 8) {
++ cfg->hdmi_3d_format[i].detail_3d = 0;
++ index_inc++;
++ }
++
++ DPRINTK("vic_order_2d=%d, 3d_struct=%d, 3d_detail=0x%x\n",
++ cfg->hdmi_3d_format[i].vic_order_2d,
++ cfg->hdmi_3d_format[i].struct_3d,
++ cfg->hdmi_3d_format[i].detail_3d);
++ i++;
++ }
++ }
++ index += index_inc;
++ }
++ }
++
++ index = vsd_end;
++
++ break;
++ }
++ case 0x1: /*Audio data block*/
++ {
++ u8 audio_format, max_ch, byte1, byte2, byte3;
++
++ i = 0;
++ cfg->max_channels = 0;
++ cfg->sample_rates = 0;
++ cfg->sample_sizes = 0;
++
++ while (i < blklen) {
++ byte1 = edid[index + 1];
++ byte2 = edid[index + 2];
++ byte3 = edid[index + 3];
++ index += 3;
++ i += 3;
++
++ audio_format = byte1 >> 3;
++ max_ch = (byte1 & 0x07) + 1;
++
++ DPRINTK("Audio Format Descriptor : %2d\n", audio_format);
++ DPRINTK("Max Number of Channels : %2d\n", max_ch);
++ DPRINTK("Sample Rates : %02x\n", byte2);
++
++ /* ALSA can't specify specific compressed
++ * formats, so only care about PCM for now. */
++ if (audio_format == AUDIO_CODING_TYPE_LPCM) {
++ if (max_ch > cfg->max_channels)
++ cfg->max_channels = max_ch;
++
++ cfg->sample_rates |= byte2;
++ cfg->sample_sizes |= byte3 & 0x7;
++ DPRINTK("Sample Sizes : %02x\n",
++ byte3 & 0x7);
++ }
++ }
++ break;
++ }
++ case 0x4: /*Speaker allocation block*/
++ {
++ i = 0;
++ while (i < blklen) {
++ cfg->speaker_alloc = edid[index + 1];
++ index += 3;
++ i += 3;
++ DPRINTK("Speaker Alloc : %02x\n", cfg->speaker_alloc);
++ }
++ break;
++ }
++ case 0x7: /*User extended block*/
++ default:
++ /* skip */
++ DPRINTK("Not handle block, tagcode = 0x%x\n", tagcode);
++ index += blklen;
++ break;
++ }
++
++ index++;
++ }
++ }
++
++ /* long desc */
++ DPRINTK("CEA long desc timmings\n");
++ index = detail_timing_desc_offset;
++ block = edid + index;
++ while (index < (EDID_LENGTH - DETAILED_TIMING_DESCRIPTION_SIZE)) {
++ if (!(block[0] == 0x00 && block[1] == 0x00)) {
++ get_detailed_timing(block, &mode[num]);
++ num++;
++ }
++ block += DETAILED_TIMING_DESCRIPTION_SIZE;
++ index += DETAILED_TIMING_DESCRIPTION_SIZE;
++ }
++
++ if (!num) {
++ kfree(mode);
++ return 0;
++ }
++
++ m = kmalloc((num + specs->modedb_len) *
++ sizeof(struct fb_videomode), GFP_KERNEL);
++ if (!m)
++ return 0;
++
++ if (specs->modedb_len) {
++ memmove(m, specs->modedb,
++ specs->modedb_len * sizeof(struct fb_videomode));
++ kfree(specs->modedb);
++ }
++ memmove(m+specs->modedb_len, mode,
++ num * sizeof(struct fb_videomode));
++ kfree(mode);
++
++ specs->modedb_len += num;
++ specs->modedb = m;
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_edid_parse_ext_blk);
++
++static int mxc_edid_readblk(struct i2c_adapter *adp,
++ unsigned short addr, unsigned char *edid)
++{
++ int ret = 0, extblknum = 0;
++ unsigned char regaddr = 0x0;
++ struct i2c_msg msg[2] = {
++ {
++ .addr = addr,
++ .flags = 0,
++ .len = 1,
++ .buf = &regaddr,
++ }, {
++ .addr = addr,
++ .flags = I2C_M_RD,
++ .len = EDID_LENGTH,
++ .buf = edid,
++ },
++ };
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++
++ if (edid[1] == 0x00)
++ return -ENOENT;
++
++ extblknum = edid[0x7E];
++
++ if (extblknum) {
++ regaddr = 128;
++ msg[1].buf = edid + EDID_LENGTH;
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID ext block\n");
++ return -EIO;
++ }
++ }
++
++ return extblknum;
++}
++
++static int mxc_edid_readsegblk(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, int seg_num)
++{
++ int ret = 0;
++ unsigned char segment = 0x1, regaddr = 0;
++ struct i2c_msg msg[3] = {
++ {
++ .addr = 0x30,
++ .flags = 0,
++ .len = 1,
++ .buf = &segment,
++ }, {
++ .addr = addr,
++ .flags = 0,
++ .len = 1,
++ .buf = &regaddr,
++ }, {
++ .addr = addr,
++ .flags = I2C_M_RD,
++ .len = EDID_LENGTH,
++ .buf = edid,
++ },
++ };
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++
++ if (seg_num == 2) {
++ regaddr = 128;
++ msg[2].buf = edid + EDID_LENGTH;
++
++ ret = i2c_transfer(adp, msg, ARRAY_SIZE(msg));
++ if (ret != ARRAY_SIZE(msg)) {
++ DPRINTK("unable to read EDID block\n");
++ return -EIO;
++ }
++ }
++
++ return ret;
++}
++
++int mxc_edid_var_to_vic(struct fb_var_screeninfo *var)
++{
++ int i;
++ struct fb_videomode m;
++
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ fb_var_to_videomode(&m, var);
++ if (mxc_edid_fb_mode_is_equal(false, &m, &mxc_cea_mode[i], FB_VMODE_MASK))
++ break;
++ }
++
++ if (i == ARRAY_SIZE(mxc_cea_mode))
++ return 0;
++
++ return i;
++}
++EXPORT_SYMBOL(mxc_edid_var_to_vic);
++
++int mxc_edid_mode_to_vic(const struct fb_videomode *mode, u32 mode_mask)
++{
++ int i;
++ bool use_aspect = (mode->vmode & FB_VMODE_ASPECT_MASK);
++ u32 use_mask = mode_mask ? mode_mask : FB_VMODE_MASK ^ (FB_VMODE_3D_MASK | FB_VMODE_FRACTIONAL);
++
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ if (mxc_edid_fb_mode_is_equal(use_aspect, mode, &mxc_cea_mode[i], use_mask))
++ break;
++ }
++
++ if (i == ARRAY_SIZE(mxc_cea_mode))
++ return 0;
++
++ return i;
++}
++EXPORT_SYMBOL(mxc_edid_mode_to_vic);
++
++/* make sure edid has 512 bytes*/
++int mxc_edid_read(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, struct mxc_edid_cfg *cfg, struct fb_info *fbi)
++{
++ int ret = 0, extblknum;
++ if (!adp || !edid || !cfg || !fbi)
++ return -EINVAL;
++
++ memset(edid, 0, EDID_LENGTH*4);
++ memset(cfg, 0, sizeof(struct mxc_edid_cfg));
++
++ extblknum = mxc_edid_readblk(adp, addr, edid);
++ if (extblknum < 0)
++ return extblknum;
++
++ /* edid first block parsing */
++ memset(&fbi->monspecs, 0, sizeof(fbi->monspecs));
++ fb_edid_to_monspecs(edid, &fbi->monspecs);
++
++ if (extblknum) {
++ int i;
++
++ /* need read segment block? */
++ if (extblknum > 1) {
++ ret = mxc_edid_readsegblk(adp, addr,
++ edid + EDID_LENGTH*2, extblknum - 1);
++ if (ret < 0)
++ return ret;
++ }
++
++ for (i = 1; i <= extblknum; i++)
++ /* edid ext block parsing */
++ mxc_edid_parse_ext_blk(edid + i*EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ }
++
++ return 0;
++}
++EXPORT_SYMBOL(mxc_edid_read);
++
++const struct fb_videomode *mxc_fb_find_nearest_mode(const struct fb_videomode *mode,
++ struct list_head *head)
++{
++ struct list_head *pos;
++ struct fb_modelist *modelist;
++ struct fb_videomode *cmode, *best = NULL;
++ u32 diff = -1, diff_refresh = -1;
++
++ list_for_each(pos, head) {
++ u32 d;
++
++ modelist = list_entry(pos, struct fb_modelist, list);
++ cmode = &modelist->mode;
++
++ if (!(mode->vmode & FB_VMODE_3D_MASK) &&
++ (cmode->vmode & FB_VMODE_3D_MASK))
++ continue;
++ if ((mode->vmode & FB_VMODE_3D_MASK) &&
++ ((mode->vmode & FB_VMODE_3D_MASK) != (cmode->vmode & FB_VMODE_3D_MASK)))
++ continue;
++
++ if ((mode->vmode & FB_VMODE_MASK_SIMPLE) != (cmode->vmode & FB_VMODE_MASK_SIMPLE))
++ continue;
++
++ if ((mode->vmode & FB_VMODE_ASPECT_MASK) &&
++ ((mode->vmode & FB_VMODE_ASPECT_MASK) != (cmode->vmode & FB_VMODE_ASPECT_MASK)))
++ continue;
++
++ if ((mode->vmode & FB_VMODE_FRACTIONAL) != (cmode->vmode & FB_VMODE_FRACTIONAL))
++ continue;
++
++ d = abs(cmode->xres - mode->xres) +
++ abs(cmode->yres - mode->yres);
++ if (diff > d) {
++ diff = d;
++ diff_refresh = abs(cmode->refresh - mode->refresh);
++ best = cmode;
++ } else if (diff == d) {
++ d = abs(cmode->refresh - mode->refresh);
++ if (diff_refresh > d) {
++ diff_refresh = d;
++ best = cmode;
++ }
++ }
++ }
++
++ return best;
++}
++EXPORT_SYMBOL(mxc_fb_find_nearest_mode);
+diff -Nur linux-4.1.3/drivers/video/mxc/mxcfb_hx8369_wvga.c linux-xbian-imx6/drivers/video/mxc/mxcfb_hx8369_wvga.c
+--- linux-4.1.3/drivers/video/mxc/mxcfb_hx8369_wvga.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mxcfb_hx8369_wvga.c 2015-07-27 23:13:08.753749907 +0200
+@@ -0,0 +1,449 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/platform_device.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/io.h>
++#include <linux/bitops.h>
++#include <linux/spinlock.h>
++#include <linux/mipi_dsi.h>
++#include <linux/mxcfb.h>
++#include <linux/backlight.h>
++#include <video/mipi_display.h>
++
++#include "mipi_dsi.h"
++
++#define MIPI_DSI_MAX_RET_PACK_SIZE (0x4)
++
++#define HX8369BL_MAX_BRIGHT (255)
++#define HX8369BL_DEF_BRIGHT (255)
++
++#define HX8369_MAX_DPHY_CLK (800)
++#define HX8369_ONE_DATA_LANE (0x1)
++#define HX8369_TWO_DATA_LANE (0x2)
++
++#define HX8369_CMD_SETEXTC (0xB9)
++#define HX8369_CMD_SETEXTC_LEN (0x4)
++#define HX8369_CMD_SETEXTC_PARAM_1 (0x6983ff)
++
++#define HX8369_CMD_GETHXID (0xF4)
++#define HX8369_CMD_GETHXID_LEN (0x4)
++#define HX8369_ID (0x69)
++#define HX8369_ID_MASK (0xFF)
++
++#define HX8369_CMD_SETDISP (0xB2)
++#define HX8369_CMD_SETDISP_LEN (16)
++#define HX8369_CMD_SETDISP_1_HALT (0x00)
++#define HX8369_CMD_SETDISP_2_RES_MODE (0x23)
++#define HX8369_CMD_SETDISP_3_BP (0x03)
++#define HX8369_CMD_SETDISP_4_FP (0x03)
++#define HX8369_CMD_SETDISP_5_SAP (0x70)
++#define HX8369_CMD_SETDISP_6_GENON (0x00)
++#define HX8369_CMD_SETDISP_7_GENOFF (0xff)
++#define HX8369_CMD_SETDISP_8_RTN (0x00)
++#define HX8369_CMD_SETDISP_9_TEI (0x00)
++#define HX8369_CMD_SETDISP_10_TEP_UP (0x00)
++#define HX8369_CMD_SETDISP_11_TEP_LOW (0x00)
++#define HX8369_CMD_SETDISP_12_BP_PE (0x03)
++#define HX8369_CMD_SETDISP_13_FP_PE (0x03)
++#define HX8369_CMD_SETDISP_14_RTN_PE (0x00)
++#define HX8369_CMD_SETDISP_15_GON (0x01)
++
++#define HX8369_CMD_SETCYC (0xB4)
++#define HX8369_CMD_SETCYC_LEN (6)
++#define HX8369_CMD_SETCYC_PARAM_1 (0x5f1d00)
++#define HX8369_CMD_SETCYC_PARAM_2 (0x060e)
++
++#define HX8369_CMD_SETGIP (0xD5)
++#define HX8369_CMD_SETGIP_LEN (27)
++#define HX8369_CMD_SETGIP_PARAM_1 (0x030400)
++#define HX8369_CMD_SETGIP_PARAM_2 (0x1c050100)
++#define HX8369_CMD_SETGIP_PARAM_3 (0x00030170)
++#define HX8369_CMD_SETGIP_PARAM_4 (0x51064000)
++#define HX8369_CMD_SETGIP_PARAM_5 (0x41000007)
++#define HX8369_CMD_SETGIP_PARAM_6 (0x07075006)
++#define HX8369_CMD_SETGIP_PARAM_7 (0x040f)
++
++#define HX8369_CMD_SETPOWER (0xB1)
++#define HX8369_CMD_SETPOWER_LEN (20)
++#define HX8369_CMD_SETPOWER_PARAM_1 (0x340001)
++#define HX8369_CMD_SETPOWER_PARAM_2 (0x0f0f0006)
++#define HX8369_CMD_SETPOWER_PARAM_3 (0x3f3f322a)
++#define HX8369_CMD_SETPOWER_PARAM_4 (0xe6013a07)
++#define HX8369_CMD_SETPOWER_PARAM_5 (0xe6e6e6e6)
++
++#define HX8369_CMD_SETVCOM (0xB6)
++#define HX8369_CMD_SETVCOM_LEN (3)
++#define HX8369_CMD_SETVCOM_PARAM_1 (0x5656)
++
++#define HX8369_CMD_SETPANEL (0xCC)
++#define HX8369_CMD_SETPANEL_PARAM_1 (0x02)
++
++#define HX8369_CMD_SETGAMMA (0xE0)
++#define HX8369_CMD_SETGAMMA_LEN (35)
++#define HX8369_CMD_SETGAMMA_PARAM_1 (0x221d00)
++#define HX8369_CMD_SETGAMMA_PARAM_2 (0x2e3f3d38)
++#define HX8369_CMD_SETGAMMA_PARAM_3 (0x0f0d064a)
++#define HX8369_CMD_SETGAMMA_PARAM_4 (0x16131513)
++#define HX8369_CMD_SETGAMMA_PARAM_5 (0x1d001910)
++#define HX8369_CMD_SETGAMMA_PARAM_6 (0x3f3d3822)
++#define HX8369_CMD_SETGAMMA_PARAM_7 (0x0d064a2e)
++#define HX8369_CMD_SETGAMMA_PARAM_8 (0x1315130f)
++#define HX8369_CMD_SETGAMMA_PARAM_9 (0x191016)
++
++#define HX8369_CMD_SETMIPI (0xBA)
++#define HX8369_CMD_SETMIPI_LEN (14)
++#define HX8369_CMD_SETMIPI_PARAM_1 (0xc6a000)
++#define HX8369_CMD_SETMIPI_PARAM_2 (0x10000a00)
++#define HX8369_CMD_SETMIPI_ONELANE (0x10 << 24)
++#define HX8369_CMD_SETMIPI_TWOLANE (0x11 << 24)
++#define HX8369_CMD_SETMIPI_PARAM_3 (0x00026f30)
++#define HX8369_CMD_SETMIPI_PARAM_4 (0x4018)
++
++#define HX8369_CMD_SETPIXEL_FMT (0x3A)
++#define HX8369_CMD_SETPIXEL_FMT_24BPP (0x77)
++#define HX8369_CMD_SETPIXEL_FMT_18BPP (0x66)
++#define HX8369_CMD_SETPIXEL_FMT_16BPP (0x55)
++
++#define HX8369_CMD_SETCLUMN_ADDR (0x2A)
++#define HX8369_CMD_SETCLUMN_ADDR_LEN (5)
++#define HX8369_CMD_SETCLUMN_ADDR_PARAM_1 (0xdf0000)
++#define HX8369_CMD_SETCLUMN_ADDR_PARAM_2 (0x01)
++
++#define HX8369_CMD_SETPAGE_ADDR (0x2B)
++#define HX8369_CMD_SETPAGE_ADDR_LEN (5)
++#define HX8369_CMD_SETPAGE_ADDR_PARAM_1 (0x1f0000)
++#define HX8369_CMD_SETPAGE_ADDR_PARAM_2 (0x03)
++
++#define HX8369_CMD_WRT_DISP_BRIGHT (0x51)
++#define HX8369_CMD_WRT_DISP_BRIGHT_PARAM_1 (0xFF)
++
++#define HX8369_CMD_WRT_CABC_MIN_BRIGHT (0x5E)
++#define HX8369_CMD_WRT_CABC_MIN_BRIGHT_PARAM_1 (0x20)
++
++#define HX8369_CMD_WRT_CABC_CTRL (0x55)
++#define HX8369_CMD_WRT_CABC_CTRL_PARAM_1 (0x1)
++
++#define HX8369_CMD_WRT_CTRL_DISP (0x53)
++#define HX8369_CMD_WRT_CTRL_DISP_PARAM_1 (0x24)
++
++#define CHECK_RETCODE(ret) \
++do { \
++ if (ret < 0) { \
++ dev_err(&mipi_dsi->pdev->dev, \
++ "%s ERR: ret:%d, line:%d.\n", \
++ __func__, ret, __LINE__); \
++ return ret; \
++ } \
++} while (0)
++
++static int hx8369bl_brightness;
++static int mipid_init_backlight(struct mipi_dsi_info *mipi_dsi);
++
++static struct fb_videomode truly_lcd_modedb[] = {
++ {
++ "TRULY-WVGA", 64, 480, 800, 37880,
++ 8, 8,
++ 6, 6,
++ 8, 6,
++ FB_SYNC_OE_LOW_ACT,
++ FB_VMODE_NONINTERLACED,
++ 0,
++ },
++};
++
++static struct mipi_lcd_config lcd_config = {
++ .virtual_ch = 0x0,
++ .data_lane_num = HX8369_TWO_DATA_LANE,
++ .max_phy_clk = HX8369_MAX_DPHY_CLK,
++ .dpi_fmt = MIPI_RGB888,
++};
++void mipid_hx8369_get_lcd_videomode(struct fb_videomode **mode, int *size,
++ struct mipi_lcd_config **data)
++{
++ *mode = &truly_lcd_modedb[0];
++ *size = ARRAY_SIZE(truly_lcd_modedb);
++ *data = &lcd_config;
++}
++
++int mipid_hx8369_lcd_setup(struct mipi_dsi_info *mipi_dsi)
++{
++ u32 buf[DSI_CMD_BUF_MAXSIZE];
++ int err;
++
++ dev_dbg(&mipi_dsi->pdev->dev, "MIPI DSI LCD setup.\n");
++ buf[0] = HX8369_CMD_SETEXTC | (HX8369_CMD_SETEXTC_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETEXTC_LEN);
++ CHECK_RETCODE(err);
++ buf[0] = MIPI_DSI_MAX_RET_PACK_SIZE;
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
++ buf, 0);
++ CHECK_RETCODE(err);
++ buf[0] = HX8369_CMD_GETHXID;
++ err = mipi_dsi_pkt_read(mipi_dsi,
++ MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM,
++ buf, HX8369_CMD_GETHXID_LEN);
++ if (!err && ((buf[0] & HX8369_ID_MASK) == HX8369_ID)) {
++ dev_info(&mipi_dsi->pdev->dev,
++ "MIPI DSI LCD ID:0x%x.\n", buf[0]);
++ } else {
++ dev_err(&mipi_dsi->pdev->dev,
++ "mipi_dsi_pkt_read err:%d, data:0x%x.\n",
++ err, buf[0]);
++ dev_info(&mipi_dsi->pdev->dev,
++ "MIPI DSI LCD not detected!\n");
++ return err;
++ }
++
++ /* set LCD resolution as 480RGBx800, DPI interface,
++ * display operation mode: RGB data bypass GRAM mode.
++ */
++ buf[0] = HX8369_CMD_SETDISP | (HX8369_CMD_SETDISP_1_HALT << 8) |
++ (HX8369_CMD_SETDISP_2_RES_MODE << 16) |
++ (HX8369_CMD_SETDISP_3_BP << 24);
++ buf[1] = HX8369_CMD_SETDISP_4_FP | (HX8369_CMD_SETDISP_5_SAP << 8) |
++ (HX8369_CMD_SETDISP_6_GENON << 16) |
++ (HX8369_CMD_SETDISP_7_GENOFF << 24);
++ buf[2] = HX8369_CMD_SETDISP_8_RTN | (HX8369_CMD_SETDISP_9_TEI << 8) |
++ (HX8369_CMD_SETDISP_10_TEP_UP << 16) |
++ (HX8369_CMD_SETDISP_11_TEP_LOW << 24);
++ buf[3] = HX8369_CMD_SETDISP_12_BP_PE |
++ (HX8369_CMD_SETDISP_13_FP_PE << 8) |
++ (HX8369_CMD_SETDISP_14_RTN_PE << 16) |
++ (HX8369_CMD_SETDISP_15_GON << 24);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETDISP_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set display waveform cycle */
++ buf[0] = HX8369_CMD_SETCYC | (HX8369_CMD_SETCYC_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETCYC_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETCYC_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set GIP timing output control */
++ buf[0] = HX8369_CMD_SETGIP | (HX8369_CMD_SETGIP_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETGIP_PARAM_2;
++ buf[2] = HX8369_CMD_SETGIP_PARAM_3;
++ buf[3] = HX8369_CMD_SETGIP_PARAM_4;
++ buf[4] = HX8369_CMD_SETGIP_PARAM_5;
++ buf[5] = HX8369_CMD_SETGIP_PARAM_6;
++ buf[6] = HX8369_CMD_SETGIP_PARAM_7;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETGIP_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set power: standby, DC etc. */
++ buf[0] = HX8369_CMD_SETPOWER | (HX8369_CMD_SETPOWER_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETPOWER_PARAM_2;
++ buf[2] = HX8369_CMD_SETPOWER_PARAM_3;
++ buf[3] = HX8369_CMD_SETPOWER_PARAM_4;
++ buf[4] = HX8369_CMD_SETPOWER_PARAM_5;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETPOWER_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set VCOM voltage. */
++ buf[0] = HX8369_CMD_SETVCOM | (HX8369_CMD_SETVCOM_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETVCOM_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set Panel: BGR/RGB or Inversion. */
++ buf[0] = HX8369_CMD_SETPANEL | (HX8369_CMD_SETPANEL_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi,
++ MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM, buf, 0);
++ CHECK_RETCODE(err);
++
++ /* Set gamma curve related setting */
++ buf[0] = HX8369_CMD_SETGAMMA | (HX8369_CMD_SETGAMMA_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETGAMMA_PARAM_2;
++ buf[2] = HX8369_CMD_SETGAMMA_PARAM_3;
++ buf[3] = HX8369_CMD_SETGAMMA_PARAM_4;
++ buf[4] = HX8369_CMD_SETGAMMA_PARAM_5;
++ buf[5] = HX8369_CMD_SETGAMMA_PARAM_6;
++ buf[7] = HX8369_CMD_SETGAMMA_PARAM_7;
++ buf[7] = HX8369_CMD_SETGAMMA_PARAM_8;
++ buf[8] = HX8369_CMD_SETGAMMA_PARAM_9;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETGAMMA_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set MIPI: DPHYCMD & DSICMD, data lane number */
++ buf[0] = HX8369_CMD_SETMIPI | (HX8369_CMD_SETMIPI_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETMIPI_PARAM_2;
++ buf[2] = HX8369_CMD_SETMIPI_PARAM_3;
++ if (lcd_config.data_lane_num == HX8369_ONE_DATA_LANE)
++ buf[2] |= HX8369_CMD_SETMIPI_ONELANE;
++ else
++ buf[2] |= HX8369_CMD_SETMIPI_TWOLANE;
++ buf[3] = HX8369_CMD_SETMIPI_PARAM_4;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE, buf,
++ HX8369_CMD_SETMIPI_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set pixel format:24bpp */
++ buf[0] = HX8369_CMD_SETPIXEL_FMT;
++ switch (lcd_config.dpi_fmt) {
++ case MIPI_RGB565_PACKED:
++ case MIPI_RGB565_LOOSELY:
++ case MIPI_RGB565_CONFIG3:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_16BPP << 8);
++ break;
++
++ case MIPI_RGB666_LOOSELY:
++ case MIPI_RGB666_PACKED:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_18BPP << 8);
++ break;
++
++ case MIPI_RGB888:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_24BPP << 8);
++ break;
++
++ default:
++ buf[0] |= (HX8369_CMD_SETPIXEL_FMT_24BPP << 8);
++ break;
++ }
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ /* Set column address: 0~479 */
++ buf[0] = HX8369_CMD_SETCLUMN_ADDR |
++ (HX8369_CMD_SETCLUMN_ADDR_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETCLUMN_ADDR_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETCLUMN_ADDR_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set page address: 0~799 */
++ buf[0] = HX8369_CMD_SETPAGE_ADDR |
++ (HX8369_CMD_SETPAGE_ADDR_PARAM_1 << 8);
++ buf[1] = HX8369_CMD_SETPAGE_ADDR_PARAM_2;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_LONG_WRITE,
++ buf, HX8369_CMD_SETPAGE_ADDR_LEN);
++ CHECK_RETCODE(err);
++
++ /* Set display brightness related */
++ buf[0] = HX8369_CMD_WRT_DISP_BRIGHT |
++ (HX8369_CMD_WRT_DISP_BRIGHT_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ buf[0] = HX8369_CMD_WRT_CABC_CTRL |
++ (HX8369_CMD_WRT_CABC_CTRL_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ buf[0] = HX8369_CMD_WRT_CTRL_DISP |
++ (HX8369_CMD_WRT_CTRL_DISP_PARAM_1 << 8);
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ /* exit sleep mode and set display on */
++ buf[0] = MIPI_DCS_EXIT_SLEEP_MODE;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++ /* To allow time for the supply voltages
++ * and clock circuits to stabilize.
++ */
++ msleep(5);
++ buf[0] = MIPI_DCS_SET_DISPLAY_ON;
++ err = mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM,
++ buf, 0);
++ CHECK_RETCODE(err);
++
++ err = mipid_init_backlight(mipi_dsi);
++ return err;
++}
++
++static int mipid_bl_update_status(struct backlight_device *bl)
++{
++ u32 buf;
++ int brightness = bl->props.brightness;
++ struct mipi_dsi_info *mipi_dsi = bl_get_data(bl);
++
++ if (bl->props.power != FB_BLANK_UNBLANK ||
++ bl->props.fb_blank != FB_BLANK_UNBLANK)
++ brightness = 0;
++
++ buf = HX8369_CMD_WRT_DISP_BRIGHT |
++ ((brightness & HX8369BL_MAX_BRIGHT) << 8);
++ mipi_dsi_pkt_write(mipi_dsi, MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM,
++ &buf, 0);
++
++ hx8369bl_brightness = brightness & HX8369BL_MAX_BRIGHT;
++
++ dev_dbg(&bl->dev, "mipid backlight bringtness:%d.\n", brightness);
++ return 0;
++}
++
++static int mipid_bl_get_brightness(struct backlight_device *bl)
++{
++ return hx8369bl_brightness;
++}
++
++static int mipi_bl_check_fb(struct backlight_device *bl, struct fb_info *fbi)
++{
++ return 0;
++}
++
++static const struct backlight_ops mipid_lcd_bl_ops = {
++ .update_status = mipid_bl_update_status,
++ .get_brightness = mipid_bl_get_brightness,
++ .check_fb = mipi_bl_check_fb,
++};
++
++static int mipid_init_backlight(struct mipi_dsi_info *mipi_dsi)
++{
++ struct backlight_properties props;
++ struct backlight_device *bl;
++
++ if (mipi_dsi->bl) {
++ pr_debug("mipid backlight already init!\n");
++ return 0;
++ }
++ memset(&props, 0, sizeof(struct backlight_properties));
++ props.max_brightness = HX8369BL_MAX_BRIGHT;
++ props.type = BACKLIGHT_RAW;
++ bl = backlight_device_register("mipid-bl", &mipi_dsi->pdev->dev,
++ mipi_dsi, &mipid_lcd_bl_ops, &props);
++ if (IS_ERR(bl)) {
++ pr_err("error %ld on backlight register\n", PTR_ERR(bl));
++ return PTR_ERR(bl);
++ }
++ mipi_dsi->bl = bl;
++ bl->props.power = FB_BLANK_UNBLANK;
++ bl->props.fb_blank = FB_BLANK_UNBLANK;
++ bl->props.brightness = HX8369BL_DEF_BRIGHT;
++
++ mipid_bl_update_status(bl);
++ return 0;
++}
+diff -Nur linux-4.1.3/drivers/video/mxc/mxc_hdmi.c linux-xbian-imx6/drivers/video/mxc/mxc_hdmi.c
+--- linux-4.1.3/drivers/video/mxc/mxc_hdmi.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mxc_hdmi.c 2015-07-27 23:13:08.753749907 +0200
+@@ -0,0 +1,3404 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++/*
++ * SH-Mobile High-Definition Multimedia Interface (HDMI) driver
++ * for SLISHDMI13T and SLIPHDMIT IP cores
++ *
++ * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/input.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/io.h>
++#include <linux/fb.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/clk.h>
++#include <linux/uaccess.h>
++#include <linux/cpufreq.h>
++#include <linux/firmware.h>
++#include <linux/kthread.h>
++#include <linux/regulator/driver.h>
++#include <linux/fsl_devices.h>
++#include <linux/ipu.h>
++#include <linux/regmap.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/of_device.h>
++
++#include <linux/console.h>
++#include <linux/types.h>
++
++#include "../fbdev/edid.h"
++#include <video/mxc_edid.h>
++#include <video/mxc_hdmi.h>
++#include "mxc_dispdrv.h"
++
++#include <linux/mfd/mxc-hdmi-core.h>
++
++#include <linux/hdmi.h>
++
++#define DISPDRV_HDMI "hdmi"
++#define HDMI_EDID_LEN 512
++
++/* status codes for reading edid */
++#define HDMI_EDID_SUCCESS 0
++#define HDMI_EDID_FAIL -1
++#define HDMI_EDID_SAME -2
++#define HDMI_EDID_NO_MODES -3
++
++#define NUM_CEA_VIDEO_MODES 64
++#define DEFAULT_VIDEO_MODE 16 /* 1080P */
++
++#define RGB 0
++#define YCBCR444 1
++#define YCBCR422_16BITS 2
++#define YCBCR422_8BITS 3
++#define XVYCC444 4
++
++#define fb_mode_is_equal(a, b) mxc_edid_fb_mode_is_equal(true, a, b, ~0)
++
++/*
++ * We follow a flowchart which is in the "Synopsys DesignWare Courses
++ * HDMI Transmitter Controller User Guide, 1.30a", section 3.1
++ * (dwc_hdmi_tx_user.pdf)
++ *
++ * Below are notes that say "HDMI Initialization Step X"
++ * These correspond to the flowchart.
++ */
++
++/*
++ * We are required to configure VGA mode before reading edid
++ * in HDMI Initialization Step B
++ */
++static const struct fb_videomode vga_mode = {
++ /* 640x480 @ 60 Hz, 31.5 kHz hsync */
++ NULL, 60, 640, 480, 39721, 48, 16, 33, 10, 96, 2, 0,
++ FB_VMODE_NONINTERLACED | FB_VMODE_ASPECT_4_3, FB_MODE_IS_VESA,
++};
++
++enum hdmi_datamap {
++ RGB444_8B = 0x01,
++ RGB444_10B = 0x03,
++ RGB444_12B = 0x05,
++ RGB444_16B = 0x07,
++ YCbCr444_8B = 0x09,
++ YCbCr444_10B = 0x0B,
++ YCbCr444_12B = 0x0D,
++ YCbCr444_16B = 0x0F,
++ YCbCr422_8B = 0x16,
++ YCbCr422_10B = 0x14,
++ YCbCr422_12B = 0x12,
++};
++
++enum hdmi_mxc_colorimetry {
++ eITU601,
++ eITU709,
++};
++
++struct hdmi_vmode {
++ bool mDVI;
++ bool mHSyncPolarity;
++ bool mVSyncPolarity;
++ bool mInterlaced;
++ bool mDataEnablePolarity;
++
++ unsigned long mPixelClock;
++ unsigned int mPixelRepetitionInput;
++ unsigned int mPixelRepetitionOutput;
++};
++
++struct hdmi_data_info {
++ unsigned int enc_in_format;
++ unsigned int enc_out_format;
++ unsigned int enc_color_depth;
++ unsigned int colorimetry;
++ unsigned int pix_repet_factor;
++ unsigned int hdcp_enable;
++ unsigned int rgb_out_enable;
++ unsigned int rgb_quant_range;
++ unsigned int enable_3d;
++ unsigned int enable_fract;
++ struct hdmi_vmode video_mode;
++};
++
++struct hdmi_phy_reg_config {
++ /* HDMI PHY register config for pass HCT */
++ u16 reg_vlev;
++ u16 reg_cksymtx;
++};
++
++struct mxc_hdmi {
++ struct platform_device *pdev;
++ struct platform_device *core_pdev;
++ struct mxc_dispdrv_handle *disp_mxc_hdmi;
++ struct fb_info *fbi;
++ struct clk *hdmi_isfr_clk;
++ struct clk *hdmi_iahb_clk;
++ struct timer_list jitter_timer;
++ struct work_struct hotplug_work;
++ struct delayed_work hdcp_hdp_work;
++
++ struct notifier_block nb;
++
++ struct hdmi_data_info hdmi_data;
++ int vic;
++ int edid_status;
++ struct mxc_edid_cfg edid_cfg;
++ u8 edid[HDMI_EDID_LEN];
++ bool fb_reg;
++ bool cable_plugin;
++ u8 blank;
++ bool dft_mode_set;
++ char *dft_mode_str;
++ int default_bpp;
++ u8 latest_intr_stat;
++ u8 plug_event;
++ u8 plug_mask;
++ bool irq_enabled;
++ spinlock_t irq_lock;
++ bool phy_enabled;
++ struct fb_videomode default_mode;
++ struct fb_var_screeninfo previous_non_vga_mode;
++ bool requesting_vga_for_initialization;
++
++ int *gpr_base;
++ int *gpr_hdmi_base;
++ int *gpr_sdma_base;
++ int cpu_type;
++ int cpu_version;
++ struct hdmi_phy_reg_config phy_config;
++
++ struct pinctrl *pinctrl;
++};
++
++static int hdmi_major;
++static struct class *hdmi_class;
++
++struct i2c_client *hdmi_i2c;
++struct mxc_hdmi *g_hdmi;
++
++static bool hdmi_inited;
++static bool hdcp_init;
++
++extern const struct fb_videomode mxc_cea_mode[64];
++extern void mxc_hdmi_cec_handle(u32 cec_stat);
++
++static void mxc_hdmi_setup(struct mxc_hdmi *hdmi, unsigned long event);
++static void hdmi_enable_overflow_interrupts(void);
++static void hdmi_disable_overflow_interrupts(void);
++static void mxc_hdmi_edid_rebuild_modelist(struct mxc_hdmi *hdmi);
++static void mxc_hdmi_default_edid_cfg(struct mxc_hdmi *hdmi);
++static void mxc_hdmi_default_modelist(struct mxc_hdmi *hdmi);
++static void mxc_hdmi_set_mode(struct mxc_hdmi *hdmi);
++
++static char *rgb_quant_range = "default";
++module_param(rgb_quant_range, charp, S_IRUGO);
++MODULE_PARM_DESC(rgb_quant_range, "RGB Quant Range (default, limited, full)");
++
++static char *enable_3d = "1";
++module_param(enable_3d, charp, S_IRUGO);
++MODULE_PARM_DESC(enable_3d, "3D modes enabled (0/1)");
++
++static char *enable_fract = "1";
++module_param(enable_fract, charp, S_IRUGO);
++MODULE_PARM_DESC(enable_fract, "Fractional modes enabled (0/1)");
++
++static struct platform_device_id imx_hdmi_devtype[] = {
++ {
++ .name = "hdmi-imx6DL",
++ .driver_data = IMX6DL_HDMI,
++ }, {
++ .name = "hdmi-imx6Q",
++ .driver_data = IMX6Q_HDMI,
++ }, {
++ /* sentinel */
++ }
++};
++MODULE_DEVICE_TABLE(platform, imx_hdmi_devtype);
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx6dl-hdmi-video", .data = &imx_hdmi_devtype[IMX6DL_HDMI], },
++ { .compatible = "fsl,imx6q-hdmi-video", .data = &imx_hdmi_devtype[IMX6Q_HDMI], },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
++
++static inline int cpu_is_imx6dl(struct mxc_hdmi *hdmi)
++{
++ return hdmi->cpu_type == IMX6DL_HDMI;
++}
++
++static inline void get_refresh_str(struct fb_videomode *m, char *refresh)
++{
++ snprintf(refresh, 10, "%u.%uHz", m->refresh - (int)(m->vmode & FB_VMODE_FRACTIONAL ? 1 : 0),
++ m->refresh * (int)(m->vmode & FB_VMODE_FRACTIONAL ? 999 : 1000) % 1000);
++}
++
++static void dump_fb_videomode(struct fb_videomode *m)
++{
++ char refresh[10];
++
++ get_refresh_str(m, refresh);
++ pr_debug("fb_videomode = %ux%u%c-%s (%ukHz) %u %u %u %u %u %u %u %u %u\n",
++ m->xres, m->yres, m->vmode & FB_VMODE_INTERLACED ? 'i' : 'p',
++ refresh, m->pixclock, m->left_margin,
++ m->right_margin, m->upper_margin, m->lower_margin,
++ m->hsync_len, m->vsync_len, m->sync, m->vmode, m->flag);
++}
++
++static ssize_t mxc_hdmi_show_name(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ strcpy(buf, hdmi->fbi->fix.id);
++ sprintf(buf+strlen(buf), "\n");
++
++ return strlen(buf);
++}
++
++static DEVICE_ATTR(fb_name, S_IRUGO, mxc_hdmi_show_name, NULL);
++
++static ssize_t mxc_hdmi_show_state(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->cable_plugin == false)
++ strcpy(buf, "plugout\n");
++ else
++ strcpy(buf, "plugin\n");
++
++ return strlen(buf);
++}
++
++static DEVICE_ATTR(cable_state, S_IRUGO, mxc_hdmi_show_state, NULL);
++
++static ssize_t mxc_hdmi_show_edid(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ int i, j, len = 0;
++
++ for (j = 0; j < HDMI_EDID_LEN/16; j++) {
++ for (i = 0; i < 16; i++)
++ len += sprintf(buf+len, "0x%02X ",
++ hdmi->edid[j*16 + i]);
++ len += sprintf(buf+len, "\n");
++ }
++
++ return len;
++}
++
++static DEVICE_ATTR(edid, S_IRUGO, mxc_hdmi_show_edid, NULL);
++
++static ssize_t mxc_hdmi_show_rgb_out_enable(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->hdmi_data.rgb_out_enable == true)
++ strcpy(buf, "RGB out\n");
++ else
++ strcpy(buf, "YCbCr out\n");
++
++ return strlen(buf);
++}
++
++static ssize_t mxc_hdmi_store_rgb_out_enable(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ unsigned long value;
++ int ret;
++
++ ret = kstrtol(buf, 10, &value);
++ if (ret)
++ return ret;
++
++ hdmi->hdmi_data.rgb_out_enable = value;
++
++ /* Reconfig HDMI for output color space change */
++ mxc_hdmi_setup(hdmi, 0);
++
++ return count;
++}
++
++static DEVICE_ATTR(rgb_out_enable, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_rgb_out_enable,
++ mxc_hdmi_store_rgb_out_enable);
++
++static ssize_t mxc_hdmi_show_rgb_quant_range(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ switch (hdmi->hdmi_data.rgb_quant_range) {
++ case HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE:
++ strcpy(buf, "limited\n");
++ break;
++ case HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE:
++ strcpy(buf, "full\n");
++ break;
++ case HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT:
++ default:
++ strcpy(buf, "default\n");
++ break;
++ };
++
++ return strlen(buf);
++}
++
++static ssize_t mxc_hdmi_store_rgb_quant_range(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ int ret = count;
++
++ if (sysfs_streq("limited", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE;
++ } else if (sysfs_streq("full", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE;
++ } else if (sysfs_streq("default", buf)) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT;
++ } else {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ /* Reconfig HDMI for output RGB Quant Range change if using RGB out */
++ if(hdmi->hdmi_data.rgb_out_enable)
++ mxc_hdmi_setup(hdmi, 0);
++out:
++ return ret;
++}
++
++static DEVICE_ATTR(rgb_quant_range, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_rgb_quant_range,
++ mxc_hdmi_store_rgb_quant_range);
++
++static ssize_t mxc_hdmi_show_enable_3d(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ switch (hdmi->hdmi_data.enable_3d) {
++ case 0:
++ strcpy(buf, "disabled\n");
++ break;
++ default:
++ strcpy(buf, "enabled\n");
++ break;
++ };
++
++ return strlen(buf);
++}
++
++static ssize_t mxc_hdmi_store_enable_3d(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ int ret = count;
++
++ if (sysfs_streq("disable", buf)) {
++ hdmi->hdmi_data.enable_3d = 0;
++ } else if (sysfs_streq("0", buf)) {
++ hdmi->hdmi_data.enable_3d = 0;
++ } else {
++ hdmi->hdmi_data.enable_3d = 1;
++ }
++
++ mxc_hdmi_edid_rebuild_modelist(hdmi);
++ if (hdmi->cable_plugin)
++ mxc_hdmi_set_mode(hdmi);
++
++ return ret;
++}
++
++static DEVICE_ATTR(enable_3d, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_enable_3d,
++ mxc_hdmi_store_enable_3d);
++
++static ssize_t mxc_hdmi_show_enable_fract(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ switch (hdmi->hdmi_data.enable_fract) {
++ case 0:
++ strcpy(buf, "disabled\n");
++ break;
++ default:
++ strcpy(buf, "enabled\n");
++ break;
++ };
++
++ return strlen(buf);
++}
++
++static ssize_t mxc_hdmi_store_enable_fract(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ int ret = count;
++
++ if (sysfs_streq("disable", buf)) {
++ hdmi->hdmi_data.enable_fract = 0;
++ } else if (sysfs_streq("0", buf)) {
++ hdmi->hdmi_data.enable_fract = 0;
++ } else {
++ hdmi->hdmi_data.enable_fract = 1;
++ }
++
++ mxc_hdmi_edid_rebuild_modelist(hdmi);
++ if (hdmi->cable_plugin)
++ mxc_hdmi_set_mode(hdmi);
++
++ return ret;
++}
++
++static DEVICE_ATTR(enable_fract, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_enable_fract,
++ mxc_hdmi_store_enable_fract);
++
++static ssize_t mxc_hdmi_show_hdcp_enable(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++
++ if (hdmi->hdmi_data.hdcp_enable == false)
++ strcpy(buf, "hdcp disable\n");
++ else
++ strcpy(buf, "hdcp enable\n");
++
++ return strlen(buf);
++
++}
++
++static ssize_t mxc_hdmi_store_hdcp_enable(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct mxc_hdmi *hdmi = dev_get_drvdata(dev);
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++ unsigned long value;
++ int ret;
++
++ ret = kstrtol(buf, 10, &value);
++ if (ret)
++ return ret;
++
++ hdmi->hdmi_data.hdcp_enable = value;
++
++ /* Reconfig HDMI for HDCP */
++ mxc_hdmi_setup(hdmi, 0);
++
++ if (hdmi->hdmi_data.hdcp_enable == false) {
++ sprintf(event_string, "EVENT=hdcpdisable");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++ } else {
++ sprintf(event_string, "EVENT=hdcpenable");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++ }
++
++ return count;
++
++}
++
++static DEVICE_ATTR(hdcp_enable, S_IRUGO | S_IWUSR,
++ mxc_hdmi_show_hdcp_enable, mxc_hdmi_store_hdcp_enable);
++
++/*!
++ * this submodule is responsible for the video data synchronization.
++ * for example, for RGB 4:4:4 input, the data map is defined as
++ * pin{47~40} <==> R[7:0]
++ * pin{31~24} <==> G[7:0]
++ * pin{15~8} <==> B[7:0]
++ */
++static void hdmi_video_sample(struct mxc_hdmi *hdmi)
++{
++ int color_format = 0;
++ u8 val;
++
++ if (hdmi->hdmi_data.enc_in_format == RGB) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x01;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x03;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x05;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_format = 0x07;
++ else
++ return;
++ } else if (hdmi->hdmi_data.enc_in_format == YCBCR444) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x09;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x0B;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x0D;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_format = 0x0F;
++ else
++ return;
++ } else if (hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) {
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_format = 0x16;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_format = 0x14;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_format = 0x12;
++ else
++ return;
++ }
++
++ val = HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE |
++ ((color_format << HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET) &
++ HDMI_TX_INVID0_VIDEO_MAPPING_MASK);
++ hdmi_writeb(val, HDMI_TX_INVID0);
++
++ /* Enable TX stuffing: When DE is inactive, fix the output data to 0 */
++ val = HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE |
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE |
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE;
++ hdmi_writeb(val, HDMI_TX_INSTUFFING);
++ hdmi_writeb(0x0, HDMI_TX_GYDATA0);
++ hdmi_writeb(0x0, HDMI_TX_GYDATA1);
++ hdmi_writeb(0x0, HDMI_TX_RCRDATA0);
++ hdmi_writeb(0x0, HDMI_TX_RCRDATA1);
++ hdmi_writeb(0x0, HDMI_TX_BCBDATA0);
++ hdmi_writeb(0x0, HDMI_TX_BCBDATA1);
++}
++
++static int isColorSpaceConversion(struct mxc_hdmi *hdmi)
++{
++ return (hdmi->hdmi_data.enc_in_format != hdmi->hdmi_data.enc_out_format) ||
++ (hdmi->hdmi_data.enc_out_format == RGB &&
++ ((hdmi->hdmi_data.rgb_quant_range == HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE) ||
++ (hdmi->hdmi_data.rgb_quant_range == HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT && hdmi->vic > 1)));
++}
++
++static int isColorSpaceDecimation(struct mxc_hdmi *hdmi)
++{
++ return ((hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS) &&
++ (hdmi->hdmi_data.enc_in_format == RGB ||
++ hdmi->hdmi_data.enc_in_format == YCBCR444));
++}
++
++static int isColorSpaceInterpolation(struct mxc_hdmi *hdmi)
++{
++ return ((hdmi->hdmi_data.enc_in_format == YCBCR422_8BITS) &&
++ (hdmi->hdmi_data.enc_out_format == RGB
++ || hdmi->hdmi_data.enc_out_format == YCBCR444));
++}
++
++/*!
++ * update the color space conversion coefficients.
++ */
++static void update_csc_coeffs(struct mxc_hdmi *hdmi)
++{
++ unsigned short csc_coeff[3][4];
++ unsigned int csc_scale = 1;
++ u8 val;
++ bool coeff_selected = false;
++
++ if (isColorSpaceConversion(hdmi)) { /* csc needed */
++ if (hdmi->hdmi_data.enc_out_format == RGB) {
++ if (hdmi->hdmi_data.enc_in_format == RGB) {
++ csc_coeff[0][0] = 0x1b80;
++ csc_coeff[0][1] = 0x0000;
++ csc_coeff[0][2] = 0x0000;
++ csc_coeff[0][3] = 0x0020;
++
++ csc_coeff[1][0] = 0x0000;
++ csc_coeff[1][1] = 0x1b80;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x0020;
++
++ csc_coeff[2][0] = 0x0000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x1b80;
++ csc_coeff[2][3] = 0x0020;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU601) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x6926;
++ csc_coeff[0][2] = 0x74fd;
++ csc_coeff[0][3] = 0x010e;
++
++ csc_coeff[1][0] = 0x2000;
++ csc_coeff[1][1] = 0x2cdd;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x7e9a;
++
++ csc_coeff[2][0] = 0x2000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x38b4;
++ csc_coeff[2][3] = 0x7e3b;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU709) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x7106;
++ csc_coeff[0][2] = 0x7a02;
++ csc_coeff[0][3] = 0x00a7;
++
++ csc_coeff[1][0] = 0x2000;
++ csc_coeff[1][1] = 0x3264;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x7e6d;
++
++ csc_coeff[2][0] = 0x2000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x3b61;
++ csc_coeff[2][3] = 0x7e25;
++
++ csc_scale = 1;
++ coeff_selected = true;
++ }
++ } else if (hdmi->hdmi_data.enc_in_format == RGB) {
++ if (hdmi->hdmi_data.colorimetry == eITU601) {
++ csc_coeff[0][0] = 0x2591;
++ csc_coeff[0][1] = 0x1322;
++ csc_coeff[0][2] = 0x074b;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x6535;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x7acc;
++ csc_coeff[1][3] = 0x0200;
++
++ csc_coeff[2][0] = 0x6acd;
++ csc_coeff[2][1] = 0x7534;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0200;
++
++ csc_scale = 0;
++ coeff_selected = true;
++ } else if (hdmi->hdmi_data.colorimetry == eITU709) {
++ csc_coeff[0][0] = 0x2dc5;
++ csc_coeff[0][1] = 0x0d9b;
++ csc_coeff[0][2] = 0x049e;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x62f0;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x7d11;
++ csc_coeff[1][3] = 0x0200;
++
++ csc_coeff[2][0] = 0x6756;
++ csc_coeff[2][1] = 0x78ab;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0200;
++
++ csc_scale = 0;
++ coeff_selected = true;
++ }
++ }
++ }
++
++ if (!coeff_selected) {
++ csc_coeff[0][0] = 0x2000;
++ csc_coeff[0][1] = 0x0000;
++ csc_coeff[0][2] = 0x0000;
++ csc_coeff[0][3] = 0x0000;
++
++ csc_coeff[1][0] = 0x0000;
++ csc_coeff[1][1] = 0x2000;
++ csc_coeff[1][2] = 0x0000;
++ csc_coeff[1][3] = 0x0000;
++
++ csc_coeff[2][0] = 0x0000;
++ csc_coeff[2][1] = 0x0000;
++ csc_coeff[2][2] = 0x2000;
++ csc_coeff[2][3] = 0x0000;
++
++ csc_scale = 1;
++ }
++
++ /* Update CSC parameters in HDMI CSC registers */
++ hdmi_writeb((unsigned char)(csc_coeff[0][0] & 0xFF),
++ HDMI_CSC_COEF_A1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][0] >> 8),
++ HDMI_CSC_COEF_A1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][1] & 0xFF),
++ HDMI_CSC_COEF_A2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][1] >> 8),
++ HDMI_CSC_COEF_A2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][2] & 0xFF),
++ HDMI_CSC_COEF_A3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][2] >> 8),
++ HDMI_CSC_COEF_A3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][3] & 0xFF),
++ HDMI_CSC_COEF_A4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[0][3] >> 8),
++ HDMI_CSC_COEF_A4_MSB);
++
++ hdmi_writeb((unsigned char)(csc_coeff[1][0] & 0xFF),
++ HDMI_CSC_COEF_B1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][0] >> 8),
++ HDMI_CSC_COEF_B1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][1] & 0xFF),
++ HDMI_CSC_COEF_B2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][1] >> 8),
++ HDMI_CSC_COEF_B2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][2] & 0xFF),
++ HDMI_CSC_COEF_B3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][2] >> 8),
++ HDMI_CSC_COEF_B3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][3] & 0xFF),
++ HDMI_CSC_COEF_B4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[1][3] >> 8),
++ HDMI_CSC_COEF_B4_MSB);
++
++ hdmi_writeb((unsigned char)(csc_coeff[2][0] & 0xFF),
++ HDMI_CSC_COEF_C1_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][0] >> 8),
++ HDMI_CSC_COEF_C1_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][1] & 0xFF),
++ HDMI_CSC_COEF_C2_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][1] >> 8),
++ HDMI_CSC_COEF_C2_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][2] & 0xFF),
++ HDMI_CSC_COEF_C3_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][2] >> 8),
++ HDMI_CSC_COEF_C3_MSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][3] & 0xFF),
++ HDMI_CSC_COEF_C4_LSB);
++ hdmi_writeb((unsigned char)(csc_coeff[2][3] >> 8),
++ HDMI_CSC_COEF_C4_MSB);
++
++ val = hdmi_readb(HDMI_CSC_SCALE);
++ val &= ~HDMI_CSC_SCALE_CSCSCALE_MASK;
++ val |= csc_scale & HDMI_CSC_SCALE_CSCSCALE_MASK;
++ hdmi_writeb(val, HDMI_CSC_SCALE);
++}
++
++static void hdmi_video_csc(struct mxc_hdmi *hdmi)
++{
++ int color_depth = 0;
++ int interpolation = HDMI_CSC_CFG_INTMODE_DISABLE;
++ int decimation = HDMI_CSC_CFG_DECMODE_DISABLE;
++ u8 val;
++
++ /* YCC422 interpolation to 444 mode */
++ if (isColorSpaceInterpolation(hdmi))
++ interpolation = HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1;
++ else if (isColorSpaceDecimation(hdmi))
++ decimation = HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3;
++
++ if (hdmi->hdmi_data.enc_color_depth == 8)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 10)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 12)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP;
++ else if (hdmi->hdmi_data.enc_color_depth == 16)
++ color_depth = HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP;
++ else
++ return;
++
++ /*configure the CSC registers */
++ hdmi_writeb(interpolation | decimation, HDMI_CSC_CFG);
++ val = hdmi_readb(HDMI_CSC_SCALE);
++ val &= ~HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK;
++ val |= color_depth;
++ hdmi_writeb(val, HDMI_CSC_SCALE);
++
++ update_csc_coeffs(hdmi);
++}
++
++/*!
++ * HDMI video packetizer is used to packetize the data.
++ * for example, if input is YCC422 mode or repeater is used,
++ * data should be repacked this module can be bypassed.
++ */
++static void hdmi_video_packetize(struct mxc_hdmi *hdmi)
++{
++ unsigned int color_depth = 0;
++ unsigned int remap_size = HDMI_VP_REMAP_YCC422_16bit;
++ unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
++ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
++ u8 val;
++
++ if (hdmi_data->enc_out_format == RGB
++ || hdmi_data->enc_out_format == YCBCR444) {
++ if (hdmi_data->enc_color_depth == 0)
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
++ else if (hdmi_data->enc_color_depth == 8) {
++ color_depth = 4;
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
++ } else if (hdmi_data->enc_color_depth == 10)
++ color_depth = 5;
++ else if (hdmi_data->enc_color_depth == 12)
++ color_depth = 6;
++ else if (hdmi_data->enc_color_depth == 16)
++ color_depth = 7;
++ else
++ return;
++ } else if (hdmi_data->enc_out_format == YCBCR422_8BITS) {
++ if (hdmi_data->enc_color_depth == 0 ||
++ hdmi_data->enc_color_depth == 8)
++ remap_size = HDMI_VP_REMAP_YCC422_16bit;
++ else if (hdmi_data->enc_color_depth == 10)
++ remap_size = HDMI_VP_REMAP_YCC422_20bit;
++ else if (hdmi_data->enc_color_depth == 12)
++ remap_size = HDMI_VP_REMAP_YCC422_24bit;
++ else
++ return;
++ output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422;
++ } else
++ return;
++
++ /* HDMI not support deep color,
++ * because IPU MAX support color depth is 24bit */
++ color_depth = 0;
++
++ /* set the packetizer registers */
++ val = ((color_depth << HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET) &
++ HDMI_VP_PR_CD_COLOR_DEPTH_MASK) |
++ ((hdmi_data->pix_repet_factor <<
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET) &
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
++ hdmi_writeb(val, HDMI_VP_PR_CD);
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~HDMI_VP_STUFF_PR_STUFFING_MASK;
++ val |= HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ /* Data from pixel repeater block */
++ if (hdmi_data->pix_repet_factor > 1) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
++ HDMI_VP_CONF_BYPASS_SELECT_MASK);
++ val |= HDMI_VP_CONF_PR_EN_ENABLE |
++ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else { /* data from packetizer block */
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_PR_EN_MASK |
++ HDMI_VP_CONF_BYPASS_SELECT_MASK);
++ val |= HDMI_VP_CONF_PR_EN_DISABLE |
++ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ }
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~HDMI_VP_STUFF_IDEFAULT_PHASE_MASK;
++ val |= 1 << HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ hdmi_writeb(remap_size, HDMI_VP_REMAP);
++
++ if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_PP) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
++ HDMI_VP_CONF_PP_EN_ENABLE |
++ HDMI_VP_CONF_YCC422_EN_DISABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_DISABLE |
++ HDMI_VP_CONF_PP_EN_DISABLE |
++ HDMI_VP_CONF_YCC422_EN_ENABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else if (output_select == HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS) {
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~(HDMI_VP_CONF_BYPASS_EN_MASK |
++ HDMI_VP_CONF_PP_EN_ENMASK |
++ HDMI_VP_CONF_YCC422_EN_MASK);
++ val |= HDMI_VP_CONF_BYPASS_EN_ENABLE |
++ HDMI_VP_CONF_PP_EN_DISABLE |
++ HDMI_VP_CONF_YCC422_EN_DISABLE;
++ hdmi_writeb(val, HDMI_VP_CONF);
++ } else {
++ return;
++ }
++
++ val = hdmi_readb(HDMI_VP_STUFF);
++ val &= ~(HDMI_VP_STUFF_PP_STUFFING_MASK |
++ HDMI_VP_STUFF_YCC422_STUFFING_MASK);
++ val |= HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE |
++ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE;
++ hdmi_writeb(val, HDMI_VP_STUFF);
++
++ val = hdmi_readb(HDMI_VP_CONF);
++ val &= ~HDMI_VP_CONF_OUTPUT_SELECTOR_MASK;
++ val |= output_select;
++ hdmi_writeb(val, HDMI_VP_CONF);
++}
++
++#if 0
++/* Force a fixed color screen */
++static void hdmi_video_force_output(struct mxc_hdmi *hdmi, unsigned char force)
++{
++ u8 val;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (force) {
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS2); /* R */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS1); /* G */
++ hdmi_writeb(0xFF, HDMI_FC_DBGTMDS0); /* B */
++ val = hdmi_readb(HDMI_FC_DBGFORCE);
++ val |= HDMI_FC_DBGFORCE_FORCEVIDEO;
++ hdmi_writeb(val, HDMI_FC_DBGFORCE);
++ } else {
++ val = hdmi_readb(HDMI_FC_DBGFORCE);
++ val &= ~HDMI_FC_DBGFORCE_FORCEVIDEO;
++ hdmi_writeb(val, HDMI_FC_DBGFORCE);
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS2); /* R */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS1); /* G */
++ hdmi_writeb(0x00, HDMI_FC_DBGTMDS0); /* B */
++ }
++}
++#endif
++
++static inline void hdmi_phy_test_clear(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTCLR_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTCLR_OFFSET) &
++ HDMI_PHY_TST0_TSTCLR_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_enable(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTEN_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTEN_OFFSET) &
++ HDMI_PHY_TST0_TSTEN_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_clock(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ u8 val = hdmi_readb(HDMI_PHY_TST0);
++ val &= ~HDMI_PHY_TST0_TSTCLK_MASK;
++ val |= (bit << HDMI_PHY_TST0_TSTCLK_OFFSET) &
++ HDMI_PHY_TST0_TSTCLK_MASK;
++ hdmi_writeb(val, HDMI_PHY_TST0);
++}
++
++static inline void hdmi_phy_test_din(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ hdmi_writeb(bit, HDMI_PHY_TST1);
++}
++
++static inline void hdmi_phy_test_dout(struct mxc_hdmi *hdmi,
++ unsigned char bit)
++{
++ hdmi_writeb(bit, HDMI_PHY_TST2);
++}
++
++static bool hdmi_phy_wait_i2c_done(struct mxc_hdmi *hdmi, int msec)
++{
++ unsigned char val = 0;
++ val = hdmi_readb(HDMI_IH_I2CMPHY_STAT0) & 0x3;
++ while (val == 0) {
++ udelay(1000);
++ if (msec-- == 0)
++ return false;
++ val = hdmi_readb(HDMI_IH_I2CMPHY_STAT0) & 0x3;
++ }
++ return true;
++}
++
++static void hdmi_phy_i2c_write(struct mxc_hdmi *hdmi, unsigned short data,
++ unsigned char addr)
++{
++ hdmi_writeb(0xFF, HDMI_IH_I2CMPHY_STAT0);
++ hdmi_writeb(addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
++ hdmi_writeb((unsigned char)(data >> 8),
++ HDMI_PHY_I2CM_DATAO_1_ADDR);
++ hdmi_writeb((unsigned char)(data >> 0),
++ HDMI_PHY_I2CM_DATAO_0_ADDR);
++ hdmi_writeb(HDMI_PHY_I2CM_OPERATION_ADDR_WRITE,
++ HDMI_PHY_I2CM_OPERATION_ADDR);
++ hdmi_phy_wait_i2c_done(hdmi, 1000);
++}
++
++#if 0
++static unsigned short hdmi_phy_i2c_read(struct mxc_hdmi *hdmi,
++ unsigned char addr)
++{
++ unsigned short data;
++ unsigned char msb = 0, lsb = 0;
++ hdmi_writeb(0xFF, HDMI_IH_I2CMPHY_STAT0);
++ hdmi_writeb(addr, HDMI_PHY_I2CM_ADDRESS_ADDR);
++ hdmi_writeb(HDMI_PHY_I2CM_OPERATION_ADDR_READ,
++ HDMI_PHY_I2CM_OPERATION_ADDR);
++ hdmi_phy_wait_i2c_done(hdmi, 1000);
++ msb = hdmi_readb(HDMI_PHY_I2CM_DATAI_1_ADDR);
++ lsb = hdmi_readb(HDMI_PHY_I2CM_DATAI_0_ADDR);
++ data = (msb << 8) | lsb;
++ return data;
++}
++
++static int hdmi_phy_i2c_write_verify(struct mxc_hdmi *hdmi, unsigned short data,
++ unsigned char addr)
++{
++ unsigned short val = 0;
++ hdmi_phy_i2c_write(hdmi, data, addr);
++ val = hdmi_phy_i2c_read(hdmi, addr);
++ return (val == data);
++}
++#endif
++
++static bool hdmi_edid_wait_i2c_done(struct mxc_hdmi *hdmi, int msec)
++{
++ unsigned char val = 0;
++ val = hdmi_readb(HDMI_IH_I2CM_STAT0) & 0x2;
++ while (val == 0) {
++
++ udelay(1000);
++ if (msec-- == 0) {
++ dev_dbg(&hdmi->pdev->dev,
++ "HDMI EDID i2c operation time out!!\n");
++ return false;
++ }
++ val = hdmi_readb(HDMI_IH_I2CM_STAT0) & 0x2;
++ }
++ return true;
++}
++
++static u8 hdmi_edid_i2c_read(struct mxc_hdmi *hdmi,
++ u8 addr, u8 blockno)
++{
++ u8 spointer = blockno / 2;
++ u8 edidaddress = ((blockno % 2) * 0x80) + addr;
++ u8 data;
++
++ hdmi_writeb(0xFF, HDMI_IH_I2CM_STAT0);
++ hdmi_writeb(edidaddress, HDMI_I2CM_ADDRESS);
++ hdmi_writeb(spointer, HDMI_I2CM_SEGADDR);
++ if (spointer == 0)
++ hdmi_writeb(HDMI_I2CM_OPERATION_READ,
++ HDMI_I2CM_OPERATION);
++ else
++ hdmi_writeb(HDMI_I2CM_OPERATION_READ_EXT,
++ HDMI_I2CM_OPERATION);
++
++ hdmi_edid_wait_i2c_done(hdmi, 1000);
++ data = hdmi_readb(HDMI_I2CM_DATAI);
++ hdmi_writeb(0xFF, HDMI_IH_I2CM_STAT0);
++ return data;
++}
++
++
++/* "Power-down enable (active low)"
++ * That mean that power up == 1! */
++static void mxc_hdmi_phy_enable_power(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_PDZ_OFFSET,
++ HDMI_PHY_CONF0_PDZ_MASK);
++}
++
++static void mxc_hdmi_phy_enable_tmds(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_ENTMDS_OFFSET,
++ HDMI_PHY_CONF0_ENTMDS_MASK);
++}
++
++static void mxc_hdmi_phy_gen2_pddq(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET,
++ HDMI_PHY_CONF0_GEN2_PDDQ_MASK);
++}
++
++static void mxc_hdmi_phy_gen2_txpwron(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK);
++}
++
++#if 0
++static void mxc_hdmi_phy_gen2_enhpdrxsense(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK);
++}
++#endif
++
++static void mxc_hdmi_phy_sel_data_en_pol(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET,
++ HDMI_PHY_CONF0_SELDATAENPOL_MASK);
++}
++
++static void mxc_hdmi_phy_sel_interface_control(u8 enable)
++{
++ hdmi_mask_writeb(enable, HDMI_PHY_CONF0,
++ HDMI_PHY_CONF0_SELDIPIF_OFFSET,
++ HDMI_PHY_CONF0_SELDIPIF_MASK);
++}
++
++static int hdmi_phy_configure(struct mxc_hdmi *hdmi, unsigned char pRep,
++ unsigned char cRes, int cscOn)
++{
++ u8 val;
++ u8 msec;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* color resolution 0 is 8 bit colour depth */
++ if (cRes == 0)
++ cRes = 8;
++
++ if (pRep != 0)
++ return false;
++ else if (cRes != 8 && cRes != 12)
++ return false;
++
++ /* Enable csc path */
++ if (cscOn)
++ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH;
++ else
++ val = HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS;
++
++ hdmi_writeb(val, HDMI_MC_FLOWCTRL);
++
++ /* gen2 tx power off */
++ mxc_hdmi_phy_gen2_txpwron(0);
++
++ /* gen2 pddq */
++ mxc_hdmi_phy_gen2_pddq(1);
++
++ /* PHY reset */
++ hdmi_writeb(HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
++ hdmi_writeb(HDMI_MC_PHYRSTZ_ASSERT, HDMI_MC_PHYRSTZ);
++
++ hdmi_writeb(HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
++
++ hdmi_phy_test_clear(hdmi, 1);
++ hdmi_writeb(HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2,
++ HDMI_PHY_I2CM_SLAVE_ADDR);
++ hdmi_phy_test_clear(hdmi, 0);
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock < 0) {
++ dev_dbg(&hdmi->pdev->dev, "Pixel clock (%lu) must be positive\n",
++ hdmi->hdmi_data.video_mode.mPixelClock);
++ return false;
++ }
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock <= 45250000) {
++ switch (cRes) {
++ case 8:
++ /* PLL/MPLL Cfg */
++ hdmi_phy_i2c_write(hdmi, 0x01e0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15); /* GMPCTRL */
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x21e1, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x41e2, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x15);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 92500000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x0140, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x2141, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x4142, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x15);
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 148500000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x20a1, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x40a2, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ default:
++ return false;
++ }
++ } else {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x00a0, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000a, 0x15);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x2001, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x4002, 0x06);
++ hdmi_phy_i2c_write(hdmi, 0x000f, 0x15);
++ default:
++ return false;
++ }
++ }
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock <= 54000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10); /* CURRCTRL */
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 58400000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 72000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 74250000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 118800000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else if (hdmi->hdmi_data.video_mode.mPixelClock <= 216000000) {
++ switch (cRes) {
++ case 8:
++ hdmi_phy_i2c_write(hdmi, 0x06dc, 0x10);
++ break;
++ case 10:
++ hdmi_phy_i2c_write(hdmi, 0x0b5c, 0x10);
++ break;
++ case 12:
++ hdmi_phy_i2c_write(hdmi, 0x091c, 0x10);
++ break;
++ default:
++ return false;
++ }
++ } else {
++ dev_err(&hdmi->pdev->dev,
++ "Pixel clock %lu - unsupported by HDMI\n",
++ hdmi->hdmi_data.video_mode.mPixelClock);
++ return false;
++ }
++
++ hdmi_phy_i2c_write(hdmi, 0x0000, 0x13); /* PLLPHBYCTRL */
++ hdmi_phy_i2c_write(hdmi, 0x0006, 0x17);
++ /* RESISTANCE TERM 133Ohm Cfg */
++ hdmi_phy_i2c_write(hdmi, 0x0005, 0x19); /* TXTERM */
++ /* PREEMP Cgf 0.00 */
++ hdmi_phy_i2c_write(hdmi, 0x800d, 0x09); /* CKSYMTXCTRL */
++ /* TX/CK LVL 10 */
++ hdmi_phy_i2c_write(hdmi, 0x01ad, 0x0E); /* VLEVCTRL */
++
++ /* Board specific setting for PHY register 0x09, 0x0e to pass HCT */
++ if (hdmi->phy_config.reg_cksymtx != 0)
++ hdmi_phy_i2c_write(hdmi, hdmi->phy_config.reg_cksymtx, 0x09);
++
++ if (hdmi->phy_config.reg_vlev != 0)
++ hdmi_phy_i2c_write(hdmi, hdmi->phy_config.reg_vlev, 0x0E);
++
++ /* REMOVE CLK TERM */
++ hdmi_phy_i2c_write(hdmi, 0x8000, 0x05); /* CKCALCTRL */
++
++ if (hdmi->hdmi_data.video_mode.mPixelClock > 148500000) {
++ hdmi_phy_i2c_write(hdmi, 0x800b, 0x09);
++ hdmi_phy_i2c_write(hdmi, 0x0129, 0x0E);
++ }
++
++ mxc_hdmi_phy_enable_power(1);
++
++ /* toggle TMDS enable */
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_tmds(1);
++
++ /* gen2 tx power on */
++ mxc_hdmi_phy_gen2_txpwron(1);
++ mxc_hdmi_phy_gen2_pddq(0);
++
++ /*Wait for PHY PLL lock */
++ msec = 4;
++ val = hdmi_readb(HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
++ while (val == 0) {
++ udelay(1000);
++ if (msec-- == 0) {
++ dev_dbg(&hdmi->pdev->dev, "PHY PLL not locked\n");
++ return false;
++ }
++ val = hdmi_readb(HDMI_PHY_STAT0) & HDMI_PHY_TX_PHY_LOCK;
++ }
++
++ return true;
++}
++
++static void mxc_hdmi_phy_init(struct mxc_hdmi *hdmi)
++{
++ int i;
++ bool cscon = false;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Never do phy init if pixel clock is gated.
++ * Otherwise HDMI PHY will get messed up and generate an overflow
++ * interrupt that can't be cleared or detected by accessing the
++ * status register. */
++ if (!hdmi->fb_reg || !hdmi->cable_plugin
++ || (hdmi->blank != FB_BLANK_UNBLANK))
++ return;
++
++ /*check csc whether needed activated in HDMI mode */
++ cscon = (isColorSpaceConversion(hdmi) &&
++ !hdmi->hdmi_data.video_mode.mDVI);
++
++ /* HDMI Phy spec says to do the phy initialization sequence twice */
++ for (i = 0 ; i < 2 ; i++) {
++ mxc_hdmi_phy_sel_data_en_pol(1);
++ mxc_hdmi_phy_sel_interface_control(0);
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_power(0);
++
++ /* Enable CSC */
++ hdmi_phy_configure(hdmi, 0, 8, cscon);
++ }
++
++ hdmi->phy_enabled = true;
++}
++
++static enum hdmi_3d_structure mxc_3d_structure_infoframe(u32 layout)
++{
++ switch (layout) {
++ case FB_VMODE_3D_SBS_HALF:
++ return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
++ case FB_VMODE_3D_SBS_FULL:
++ return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
++ case FB_VMODE_3D_TOP_BOTTOM:
++ return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
++ case FB_VMODE_3D_FRAME_PACK:
++ return HDMI_3D_STRUCTURE_FRAME_PACKING;
++ default:
++ return HDMI_3D_STRUCTURE_INVALID;
++ }
++}
++
++static int mxc_hdmi_vendor_infoframe(struct mxc_hdmi *hdmi, struct hdmi_vendor_infoframe *frame)
++{
++ int err;
++ u32 s3d_flags;
++ u8 vic;
++ uint8_t buffer[32]; //HDMI_FC_VSDPAYLOAD23 - HDMI_FC_VSDIEEEID0
++ //(with a hole starting at 0102B, ending 102F
++ ssize_t len;
++ int i;
++
++ if (!frame)
++ return -EINVAL;
++
++ vic = hdmi->vic;
++ s3d_flags = (*hdmi->fbi->mode).vmode & FB_VMODE_3D_MASK;
++
++ if (!vic && !s3d_flags)
++ return -EINVAL;
++
++ err = hdmi_vendor_infoframe_init(frame);
++ if (err < 0)
++ return err;
++
++ if (s3d_flags)
++ frame->s3d_struct = mxc_3d_structure_infoframe(s3d_flags);
++ else if (vic)
++ frame->vic = vic;
++ else
++ return -EINVAL;
++
++ /* see comment above for the reason for this offset */
++ len = hdmi_vendor_infoframe_pack(frame, buffer+1, sizeof(buffer)-1);
++ if (len < 0)
++ return -EINVAL;
++
++ buffer[0] = buffer[5];
++ buffer[1] = buffer[3];
++ buffer[2] = 0;
++ buffer[3] = 0;
++ buffer[4] = 0;
++ buffer[5] = 0;
++
++ //buffer[6] = buffer[6];
++ //buffer[7] = buffer[7];
++
++ for (i = 0; i < 32; i++)
++ hdmi_writeb(buffer[i], HDMI_FC_VSDIEEEID0 + i);
++ return 0;
++}
++
++static void hdmi_config_AVI(struct mxc_hdmi *hdmi)
++{
++ u8 val;
++ u8 pix_fmt;
++ u8 under_scan;
++ u8 act_ratio, coded_ratio, colorimetry, ext_colorimetry;
++ struct fb_videomode mode;
++ const struct fb_videomode *edid_mode;
++ bool aspect_16_9;
++ struct hdmi_vendor_infoframe vendor_infoframe;
++
++ dev_dbg(&hdmi->pdev->dev, "set up AVI frame\n");
++ fb_var_to_videomode(&mode, &hdmi->fbi->var);
++ /* Use mode from list extracted from EDID to get aspect ratio */
++ if (!list_empty(&hdmi->fbi->modelist)) {
++ edid_mode = fb_find_nearest_mode(&mode, &hdmi->fbi->modelist);
++ if (edid_mode->vmode & FB_VMODE_ASPECT_16_9)
++ aspect_16_9 = true;
++ else
++ aspect_16_9 = false;
++ } else
++ aspect_16_9 = false;
++
++ /********************************************
++ * AVI Data Byte 1
++ ********************************************/
++ if (hdmi->hdmi_data.enc_out_format == YCBCR444)
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR444;
++ else if (hdmi->hdmi_data.enc_out_format == YCBCR422_8BITS)
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_YCBCR422;
++ else
++ pix_fmt = HDMI_FC_AVICONF0_PIX_FMT_RGB;
++
++ if (hdmi->edid_cfg.cea_underscan)
++ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN;
++ else
++ under_scan = HDMI_FC_AVICONF0_SCAN_INFO_NODATA;
++
++ /*
++ * Active format identification data is present in the AVI InfoFrame.
++ * Under scan info, no bar data
++ */
++ val = pix_fmt | under_scan |
++ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT |
++ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA;
++
++ hdmi_writeb(val, HDMI_FC_AVICONF0);
++
++ /********************************************
++ * AVI Data Byte 2
++ ********************************************/
++
++ /* Set the Aspect Ratio */
++ if (aspect_16_9) {
++ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9;
++ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9;
++ } else {
++ act_ratio = HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3;
++ coded_ratio = HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3;
++ }
++
++ /* Set up colorimetry */
++ if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
++ if (hdmi->hdmi_data.colorimetry == eITU601)
++ ext_colorimetry =
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ else /* hdmi->hdmi_data.colorimetry == eITU709 */
++ ext_colorimetry =
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
++ } else if (hdmi->hdmi_data.enc_out_format != RGB) {
++ if (hdmi->hdmi_data.colorimetry == eITU601)
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
++ else /* hdmi->hdmi_data.colorimetry == eITU709 */
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
++ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ } else { /* Carries no data */
++ colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA;
++ ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
++ }
++
++ val = colorimetry | coded_ratio | act_ratio;
++ hdmi_writeb(val, HDMI_FC_AVICONF1);
++
++ /********************************************
++ * AVI Data Byte 3
++ ********************************************/
++
++ val = HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA | ext_colorimetry |
++ hdmi->hdmi_data.rgb_quant_range |
++ HDMI_FC_AVICONF2_SCALING_NONE;
++ hdmi_writeb(val, HDMI_FC_AVICONF2);
++
++ /********************************************
++ * AVI Data Byte 4
++ ********************************************/
++ hdmi_writeb(hdmi->vic, HDMI_FC_AVIVID);
++
++ /********************************************
++ * AVI Data Byte 5
++ ********************************************/
++
++ /* Set up input and output pixel repetition */
++ val = (((hdmi->hdmi_data.video_mode.mPixelRepetitionInput + 1) <<
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET) &
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK) |
++ ((hdmi->hdmi_data.video_mode.mPixelRepetitionOutput <<
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET) &
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK);
++ hdmi_writeb(val, HDMI_FC_PRCONF);
++
++ /* IT Content and quantization range = don't care */
++ val = HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS |
++ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED;
++ hdmi_writeb(val, HDMI_FC_AVICONF3);
++
++ /********************************************
++ * AVI Data Bytes 6-13
++ ********************************************/
++ hdmi_writeb(0, HDMI_FC_AVIETB0);
++ hdmi_writeb(0, HDMI_FC_AVIETB1);
++ hdmi_writeb(0, HDMI_FC_AVISBB0);
++ hdmi_writeb(0, HDMI_FC_AVISBB1);
++ hdmi_writeb(0, HDMI_FC_AVIELB0);
++ hdmi_writeb(0, HDMI_FC_AVIELB1);
++ hdmi_writeb(0, HDMI_FC_AVISRB0);
++ hdmi_writeb(0, HDMI_FC_AVISRB1);
++
++ mxc_hdmi_vendor_infoframe(hdmi, &vendor_infoframe);
++}
++
++/*!
++ * this submodule is responsible for the video/audio data composition.
++ */
++static void hdmi_av_composer(struct mxc_hdmi *hdmi)
++{
++ u8 inv_val;
++ struct fb_info *fbi = hdmi->fbi;
++ struct fb_videomode fb_mode;
++ struct hdmi_vmode *vmode = &hdmi->hdmi_data.video_mode;
++ int hblank, vblank;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_var_to_videomode(&fb_mode, &fbi->var);
++
++ vmode->mHSyncPolarity = ((fb_mode.sync & FB_SYNC_HOR_HIGH_ACT) != 0);
++ vmode->mVSyncPolarity = ((fb_mode.sync & FB_SYNC_VERT_HIGH_ACT) != 0);
++ vmode->mInterlaced = ((fb_mode.vmode & FB_VMODE_INTERLACED) != 0);
++ vmode->mPixelClock = (u32) (PICOS2KHZ(fb_mode.pixclock) * 1000UL);
++
++ dev_dbg(&hdmi->pdev->dev, "final pixclk = %lu\n", vmode->mPixelClock);
++
++ /* Set up HDMI_FC_INVIDCONF */
++ inv_val = (vmode->mVSyncPolarity ?
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW);
++
++ inv_val |= (vmode->mHSyncPolarity ?
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW);
++
++ inv_val |= (vmode->mDataEnablePolarity ?
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW);
++
++ if (fb_mode.vmode & FB_VMODE_FRACTIONAL)
++ inv_val |= HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH;
++ else
++ inv_val |= (vmode->mInterlaced ?
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH :
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW);
++
++ inv_val |= (vmode->mInterlaced ?
++ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED :
++ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE);
++
++ inv_val |= (vmode->mDVI ?
++ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE :
++ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE);
++
++ hdmi_writeb(inv_val, HDMI_FC_INVIDCONF);
++
++ /* Set up horizontal active pixel region width */
++ hdmi_writeb(fb_mode.xres >> 8, HDMI_FC_INHACTV1);
++ hdmi_writeb(fb_mode.xres, HDMI_FC_INHACTV0);
++
++ /* Set up vertical blanking pixel region width */
++ if (vmode->mInterlaced) {
++ hdmi_writeb((fb_mode.yres/2) >> 8, HDMI_FC_INVACTV1);
++ hdmi_writeb((fb_mode.yres/2), HDMI_FC_INVACTV0);
++ } else {
++ hdmi_writeb(fb_mode.yres >> 8, HDMI_FC_INVACTV1);
++ hdmi_writeb(fb_mode.yres, HDMI_FC_INVACTV0);
++ }
++
++ /* Set up horizontal blanking pixel region width */
++ hblank = fb_mode.left_margin + fb_mode.right_margin +
++ fb_mode.hsync_len;
++ hdmi_writeb(hblank >> 8, HDMI_FC_INHBLANK1);
++ hdmi_writeb(hblank, HDMI_FC_INHBLANK0);
++
++ /* Set up vertical blanking pixel region width */
++ vblank = fb_mode.upper_margin + fb_mode.lower_margin +
++ fb_mode.vsync_len;
++ if (vmode->mInterlaced)
++ hdmi_writeb(vblank/2, HDMI_FC_INVBLANK);
++ else
++ hdmi_writeb(vblank, HDMI_FC_INVBLANK);
++
++ /* Set up HSYNC active edge delay width (in pixel clks) */
++ hdmi_writeb(fb_mode.right_margin >> 8, HDMI_FC_HSYNCINDELAY1);
++ hdmi_writeb(fb_mode.right_margin, HDMI_FC_HSYNCINDELAY0);
++
++ /* Set up VSYNC active edge delay (in pixel clks) */
++ if (vmode->mInterlaced)
++ hdmi_writeb(fb_mode.lower_margin / 2, HDMI_FC_VSYNCINDELAY);
++ else
++ hdmi_writeb(fb_mode.lower_margin, HDMI_FC_VSYNCINDELAY);
++
++ /* Set up HSYNC active pulse width (in pixel clks) */
++ hdmi_writeb(fb_mode.hsync_len >> 8, HDMI_FC_HSYNCINWIDTH1);
++ hdmi_writeb(fb_mode.hsync_len, HDMI_FC_HSYNCINWIDTH0);
++
++ /* Set up VSYNC active edge delay (in pixel clks) */
++ if (vmode->mInterlaced)
++ hdmi_writeb(fb_mode.vsync_len / 2, HDMI_FC_VSYNCINWIDTH);
++ else
++ hdmi_writeb(fb_mode.vsync_len, HDMI_FC_VSYNCINWIDTH);
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++static int mxc_edid_read_internal(struct mxc_hdmi *hdmi, unsigned char *edid,
++ struct mxc_edid_cfg *cfg, struct fb_info *fbi)
++{
++ int extblknum;
++ int i, j, ret;
++ unsigned char *ediddata = edid;
++ unsigned char tmpedid[EDID_LENGTH];
++
++ dev_info(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (!edid || !cfg || !fbi)
++ return -EINVAL;
++
++ /* init HDMI I2CM for read edid*/
++ hdmi_writeb(0x0, HDMI_I2CM_DIV);
++ hdmi_writeb(0x00, HDMI_I2CM_SS_SCL_HCNT_1_ADDR);
++ hdmi_writeb(0x79, HDMI_I2CM_SS_SCL_HCNT_0_ADDR);
++ hdmi_writeb(0x00, HDMI_I2CM_SS_SCL_LCNT_1_ADDR);
++ hdmi_writeb(0x91, HDMI_I2CM_SS_SCL_LCNT_0_ADDR);
++
++ hdmi_writeb(0x00, HDMI_I2CM_FS_SCL_HCNT_1_ADDR);
++ hdmi_writeb(0x0F, HDMI_I2CM_FS_SCL_HCNT_0_ADDR);
++ hdmi_writeb(0x00, HDMI_I2CM_FS_SCL_LCNT_1_ADDR);
++ hdmi_writeb(0x21, HDMI_I2CM_FS_SCL_LCNT_0_ADDR);
++
++ hdmi_writeb(0x50, HDMI_I2CM_SLAVE);
++ hdmi_writeb(0x30, HDMI_I2CM_SEGADDR);
++
++ /* Umask edid interrupt */
++ hdmi_writeb(HDMI_I2CM_INT_DONE_POL,
++ HDMI_I2CM_INT);
++
++ hdmi_writeb(HDMI_I2CM_CTLINT_NAC_POL |
++ HDMI_I2CM_CTLINT_ARBITRATION_POL,
++ HDMI_I2CM_CTLINT);
++
++ /* reset edid data zero */
++ memset(edid, 0, EDID_LENGTH*4);
++ memset(cfg, 0, sizeof(struct mxc_edid_cfg));
++
++ /* Check first three byte of EDID head */
++ if (!(hdmi_edid_i2c_read(hdmi, 0, 0) == 0x00) ||
++ !(hdmi_edid_i2c_read(hdmi, 1, 0) == 0xFF) ||
++ !(hdmi_edid_i2c_read(hdmi, 2, 0) == 0xFF)) {
++ dev_info(&hdmi->pdev->dev, "EDID head check failed!");
++ return -ENOENT;
++ }
++
++ for (i = 0; i < 128; i++) {
++ *ediddata = hdmi_edid_i2c_read(hdmi, i, 0);
++ ediddata++;
++ }
++
++ extblknum = edid[0x7E];
++ if (extblknum < 0)
++ return extblknum;
++
++ if (extblknum) {
++ ediddata = edid + EDID_LENGTH;
++ for (i = 0; i < 128; i++) {
++ *ediddata = hdmi_edid_i2c_read(hdmi, i, 1);
++ ediddata++;
++ }
++ }
++
++ /* edid first block parsing */
++ memset(&fbi->monspecs, 0, sizeof(fbi->monspecs));
++ fb_edid_to_monspecs(edid, &fbi->monspecs);
++
++ ret = mxc_edid_parse_ext_blk(edid + EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ if (ret < 0) {
++ fb_edid_add_monspecs(edid + EDID_LENGTH, &fbi->monspecs);
++ if (fbi->monspecs.modedb_len > 0)
++ hdmi->edid_cfg.hdmi_cap = false;
++ else
++ return -ENOENT;
++ }
++
++ /* need read segment block? */
++ if (extblknum > 1) {
++ for (j = 1; j <= extblknum; j++) {
++ for (i = 0; i < 128; i++)
++ *(tmpedid + 1) = hdmi_edid_i2c_read(hdmi, i, j);
++
++ /* edid ext block parsing */
++ ret = mxc_edid_parse_ext_blk(tmpedid + EDID_LENGTH,
++ cfg, &fbi->monspecs);
++ if (ret < 0)
++ return -ENOENT;
++ }
++ }
++
++ return 0;
++}
++
++static int mxc_hdmi_read_edid(struct mxc_hdmi *hdmi)
++{
++ int ret;
++ u8 edid_old[HDMI_EDID_LEN];
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* save old edid */
++ memcpy(edid_old, hdmi->edid, HDMI_EDID_LEN);
++
++ /* Read EDID via HDMI DDC when HDCP Enable */
++ if (!hdcp_init)
++ ret = mxc_edid_read(hdmi_i2c->adapter, hdmi_i2c->addr,
++ hdmi->edid, &hdmi->edid_cfg, hdmi->fbi);
++ else {
++
++ /* Disable HDCP clk */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++
++ ret = mxc_edid_read_internal(hdmi, hdmi->edid,
++ &hdmi->edid_cfg, hdmi->fbi);
++
++ /* Enable HDCP clk */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis &= ~HDMI_MC_CLKDIS_HDCPCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++
++ }
++
++ if (ret < 0)
++ return HDMI_EDID_FAIL;
++
++ dev_info(&hdmi->pdev->dev, "%s HDMI in %s mode\n", __func__, hdmi->edid_cfg.hdmi_cap?"HDMI":"DVI");
++ hdmi->plug_event = hdmi->edid_cfg.hdmi_cap?HDMI_IH_PHY_STAT0_HPD:HDMI_DVI_IH_STAT;
++ hdmi->plug_mask = hdmi->edid_cfg.hdmi_cap?HDMI_PHY_HPD:HDMI_DVI_STAT;
++
++ if (!memcmp(edid_old, hdmi->edid, HDMI_EDID_LEN)) {
++ dev_info(&hdmi->pdev->dev, "same edid\n");
++ return HDMI_EDID_SAME;
++ }
++
++ if (hdmi->fbi->monspecs.modedb_len == 0) {
++ dev_info(&hdmi->pdev->dev, "No modes read from edid\n");
++ return HDMI_EDID_NO_MODES;
++ }
++
++ return HDMI_EDID_SUCCESS;
++}
++
++static void mxc_hdmi_phy_disable(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ if (!hdmi->phy_enabled)
++ return;
++
++ hdmi_disable_overflow_interrupts();
++
++ /* Setting PHY to reset status */
++ hdmi_writeb(HDMI_MC_PHYRSTZ_DEASSERT, HDMI_MC_PHYRSTZ);
++
++ /* Power down PHY */
++ mxc_hdmi_phy_enable_tmds(0);
++ mxc_hdmi_phy_enable_power(0);
++ mxc_hdmi_phy_gen2_txpwron(0);
++ mxc_hdmi_phy_gen2_pddq(1);
++
++ hdmi->phy_enabled = false;
++ dev_dbg(&hdmi->pdev->dev, "%s - exit\n", __func__);
++}
++
++/* HDMI Initialization Step B.4 */
++static void mxc_hdmi_enable_video_path(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* control period minimum duration */
++ hdmi_writeb(12, HDMI_FC_CTRLDUR);
++ hdmi_writeb(32, HDMI_FC_EXCTRLDUR);
++ hdmi_writeb(1, HDMI_FC_EXCTRLSPAC);
++
++ /* Set to fill TMDS data channels */
++ hdmi_writeb(0x0B, HDMI_FC_CH0PREAM);
++ hdmi_writeb(0x16, HDMI_FC_CH1PREAM);
++ hdmi_writeb(0x21, HDMI_FC_CH2PREAM);
++
++ /* Save CEC clock */
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS) & HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ clkdis |= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++
++ /* Enable pixel clock and tmds data path */
++ clkdis = 0x7F & clkdis;
++ clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++
++ clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++
++ /* Enable csc path */
++ if (isColorSpaceConversion(hdmi) && !hdmi->hdmi_data.video_mode.mDVI) {
++ clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++ }
++}
++
++static void hdmi_enable_audio_clk(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS);
++ clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE;
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++}
++
++/* Workaround to clear the overflow condition */
++static void mxc_hdmi_clear_overflow(struct mxc_hdmi *hdmi)
++{
++ int count;
++ u8 val;
++
++ /* TMDS software reset */
++ hdmi_writeb((u8)~HDMI_MC_SWRSTZ_TMDSSWRST_REQ, HDMI_MC_SWRSTZ);
++
++ val = hdmi_readb(HDMI_FC_INVIDCONF);
++
++ if (cpu_is_imx6dl(hdmi)) {
++ hdmi_writeb(val, HDMI_FC_INVIDCONF);
++ return;
++ }
++
++ for (count = 0 ; count < 5 ; count++)
++ hdmi_writeb(val, HDMI_FC_INVIDCONF);
++}
++
++static void hdmi_enable_overflow_interrupts(void)
++{
++ pr_debug("%s\n", __func__);
++ hdmi_writeb(0, HDMI_FC_MASK2);
++ hdmi_writeb(0, HDMI_IH_MUTE_FC_STAT2);
++}
++
++static void hdmi_disable_overflow_interrupts(void)
++{
++ pr_debug("%s\n", __func__);
++ hdmi_writeb(HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
++ HDMI_IH_MUTE_FC_STAT2);
++ hdmi_writeb(0x7f, HDMI_FC_MASK2);
++}
++
++static void mxc_hdmi_notify_fb(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Don't notify if we aren't registered yet */
++ WARN_ON(!hdmi->fb_reg);
++
++ /* disable the phy before ipu changes mode */
++ mxc_hdmi_phy_disable(hdmi);
++
++ /*
++ * Note that fb_set_var will block. During this time,
++ * FB_EVENT_MODE_CHANGE callback will happen.
++ * So by the end of this function, mxc_hdmi_setup()
++ * will be done.
++ */
++ hdmi->fbi->var.activate |= FB_ACTIVATE_FORCE;
++ console_lock();
++ hdmi->fbi->flags |= FBINFO_MISC_USEREVENT;
++ fb_set_var(hdmi->fbi, &hdmi->fbi->var);
++ hdmi->fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++inline
++static void mxc_fb_add_videomode(const struct fb_videomode *src_mode, struct list_head *modelist, const u32 new_flag, const u32 mod_vmode)
++{
++ struct fb_videomode mode;
++
++ memcpy(&mode, src_mode, sizeof(struct fb_videomode));
++ mode.flag = new_flag; mode.vmode |= mod_vmode;
++ fb_add_videomode(&mode, modelist);
++}
++
++enum {
++ hzSTART = 0,
++ hz50 = 50,
++ hz60 = 60,
++ hzEND = 61
++};
++
++struct stereo_mandatory_mode {
++ int rfc_refresh;
++ const struct fb_videomode *rfc_parent_cea_mode;
++ uint32_t flag;
++ uint32_t vmode;
++};
++
++static struct stereo_mandatory_mode stereo_mandatory_modes[] = {
++ // 1280x720p @ 59.94 / 60Hz TOP-and-BOTTOM
++ { 60, &mxc_cea_mode[4], FB_MODE_IS_3D, FB_VMODE_3D_TOP_BOTTOM },
++ // 1920x1080p @ 23.98 / 24Hz TOP-and-BOTTOM
++ { 60, &mxc_cea_mode[32], FB_MODE_IS_3D, FB_VMODE_3D_TOP_BOTTOM },
++ // 1280x720p @ 59.94 / 60Hz FRAME-PACK
++ { 60, &mxc_cea_mode[4], FB_MODE_IS_3D, FB_VMODE_3D_FRAME_PACK },
++ // 1920x1080p @ 23.98 / 24Hz FRAME-PACK
++ { 60, &mxc_cea_mode[32], FB_MODE_IS_3D, FB_VMODE_3D_FRAME_PACK },
++ // 1920x1080i @ 59.94 / 60Hz SIDE-by-SIDE half
++ { 60, &mxc_cea_mode[5], FB_MODE_IS_3D, FB_VMODE_3D_SBS_HALF },
++ // 1280x720p @ 50Hz TOP-and-BOTTOM
++ { 50, &mxc_cea_mode[19], FB_MODE_IS_3D, FB_VMODE_3D_TOP_BOTTOM },
++ // 1280x720p @ 50Hz FRAME-PACK
++ { 50, &mxc_cea_mode[19], FB_MODE_IS_3D, FB_VMODE_3D_FRAME_PACK },
++ // 1920x1080i @ 50Hz SIDE-by-SIDE half
++ { 50, &mxc_cea_mode[20], FB_MODE_IS_3D, FB_VMODE_3D_SBS_HALF }
++};
++
++static void mxc_hdmi_edid_rebuild_modelist(struct mxc_hdmi *hdmi)
++{
++ int i, j, k, nvic = 0, vic;
++ struct fb_videomode *mode;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++ fb_add_videomode(&vga_mode, &hdmi->fbi->modelist);
++
++ for (i = 0; i < hdmi->fbi->monspecs.modedb_len; i++) {
++ /*
++ * We might check here if mode is supported by HDMI.
++ * We do not currently support interlaced modes.
++ * And add CEA modes in the modelist.
++ */
++ mode = &hdmi->fbi->monspecs.modedb[i];
++
++ if ((vic = mxc_edid_mode_to_vic(mode, 0)))
++ nvic++;
++
++ // allow detailed timing specification with vic=0 for HDMI
++ // mode
++ if (hdmi->edid_cfg.hdmi_cap &&
++ (((mode->flag != FB_MODE_IS_DETAILED) && (vic == 0))
++ ||
++ (mode->flag == FB_MODE_IS_VESA)))
++ continue;
++
++ if (!mode->xres || !mode->refresh)
++ continue;
++
++ if (!(mode->vmode & FB_VMODE_ASPECT_MASK)) {
++ if (mode->yres == (mode->xres * 3)/4)
++ mode->vmode |= FB_VMODE_ASPECT_4_3;
++ else
++ mode->vmode |= FB_VMODE_ASPECT_16_9;
++ }
++
++ for (j = 0; j < 1 || (hdmi->hdmi_data.enable_fract && j < 2); j++) {
++ struct fb_videomode *tm = mode;
++ struct fb_videomode mode2;
++ char refresh[10];
++
++ if (j == 1 && (mode->refresh != 24 && mode->refresh != 30 && mode->refresh != 60))
++ break;
++
++ switch (j) {
++ case 1:
++ memcpy(&mode2, mode, sizeof(struct fb_videomode));
++ mode2.vmode = mode->vmode | FB_VMODE_FRACTIONAL;
++ mode2.pixclock = PICOS2KHZ(KHZ2PICOS(mode2.pixclock) * 1000/1001);
++ fb_add_videomode(&mode2, &hdmi->fbi->modelist);
++ tm = &mode2;
++ break;
++ default:
++ break;
++ }
++
++ get_refresh_str(tm, refresh);
++ dev_info(&hdmi->pdev->dev, "Added mode: %d, vic: %d %s", i, vic, j == 1 ? " fractional" : "");
++ dev_info(&hdmi->pdev->dev,
++ "xres = %d, yres = %d, ratio = %s, freq = %s, vmode = %d, flag = %d, pclk = %d\n",
++ tm->xres,
++ tm->yres,
++ tm->vmode & FB_VMODE_ASPECT_1 ? "1" :
++ mode->vmode & FB_VMODE_ASPECT_4_3 ? "4/3" :
++ mode->vmode & FB_VMODE_ASPECT_5_4 ? "5/4" :
++ mode->vmode & FB_VMODE_ASPECT_16_10 ? "16/10" :
++ mode->vmode & FB_VMODE_ASPECT_16_9 ? "16/9" : "n/a",
++ refresh,
++ tm->vmode,
++ tm->flag,
++ tm->pixclock);
++ fb_add_videomode(tm, &hdmi->fbi->modelist);
++ }
++
++ if (!hdmi->hdmi_data.enable_3d || !vic)
++ continue;
++
++ /* according to HDMI 1.4 specs, add mandatory modes for 50 and 60Hz existing 2d modes */
++ for (k = hzSTART + 1; k < hzEND; k++) {
++ if (mode->refresh == k && hdmi->edid_cfg.hdmi_3d_present) {
++ for (j = 0; j < sizeof(stereo_mandatory_modes) / sizeof(struct stereo_mandatory_mode); j++) {
++ if (stereo_mandatory_modes[j].rfc_refresh != k)
++ continue;
++ mxc_fb_add_videomode(stereo_mandatory_modes[j].rfc_parent_cea_mode, &hdmi->fbi->modelist,
++ stereo_mandatory_modes[j].flag, stereo_mandatory_modes[j].vmode);
++ }
++ }
++ }
++
++ if ((hdmi->edid_cfg.hdmi_3d_multi_present == 2 && hdmi->edid_cfg.hdmi_3d_mask_all & (1 << (nvic-1))) ||
++ (hdmi->edid_cfg.hdmi_3d_multi_present == 1 && nvic <= 16)) {
++ if (hdmi->edid_cfg.hdmi_3d_struct_all & 0x1)
++ mxc_fb_add_videomode(mode, &hdmi->fbi->modelist, FB_MODE_IS_3D, FB_VMODE_3D_FRAME_PACK);
++ if (hdmi->edid_cfg.hdmi_3d_struct_all & 0x6)
++ mxc_fb_add_videomode(mode, &hdmi->fbi->modelist, FB_MODE_IS_3D, FB_VMODE_3D_SBS_FULL);
++ if (hdmi->edid_cfg.hdmi_3d_struct_all & 0x40)
++ mxc_fb_add_videomode(mode, &hdmi->fbi->modelist, FB_MODE_IS_3D, FB_VMODE_3D_TOP_BOTTOM);
++ if (hdmi->edid_cfg.hdmi_3d_struct_all & 0x100)
++ mxc_fb_add_videomode(mode, &hdmi->fbi->modelist, FB_MODE_IS_3D, FB_VMODE_3D_SBS_HALF);
++ }
++
++ for (j = 0; j < hdmi->edid_cfg.hdmi_3d_len; j++) {
++ if (hdmi->edid_cfg.hdmi_3d_format[j].vic_order_2d != nvic-1)
++ continue;
++
++ if (hdmi->edid_cfg.hdmi_3d_format[j].struct_3d == 0)
++ mxc_fb_add_videomode(mode, &hdmi->fbi->modelist, FB_MODE_IS_3D, FB_VMODE_3D_FRAME_PACK);
++ if (hdmi->edid_cfg.hdmi_3d_format[j].struct_3d == 3)
++ mxc_fb_add_videomode(mode, &hdmi->fbi->modelist, FB_MODE_IS_3D, FB_VMODE_3D_SBS_FULL);
++ if (hdmi->edid_cfg.hdmi_3d_format[j].struct_3d == 6)
++ mxc_fb_add_videomode(mode, &hdmi->fbi->modelist, FB_MODE_IS_3D, FB_VMODE_3D_TOP_BOTTOM);
++ if (hdmi->edid_cfg.hdmi_3d_format[j].struct_3d == 8)
++ mxc_fb_add_videomode(mode, &hdmi->fbi->modelist, FB_MODE_IS_3D, FB_VMODE_3D_SBS_HALF);
++ }
++ }
++
++ fb_new_modelist(hdmi->fbi);
++
++ console_unlock();
++}
++
++static void mxc_hdmi_default_edid_cfg(struct mxc_hdmi *hdmi)
++{
++ /* Default setting HDMI working in HDMI mode */
++ hdmi->edid_cfg.hdmi_cap = true;
++}
++
++static void mxc_hdmi_default_modelist(struct mxc_hdmi *hdmi)
++{
++ u32 i;
++ const struct fb_videomode *mode;
++ struct fb_videomode m;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* If not EDID data read, set up default modelist */
++ dev_info(&hdmi->pdev->dev, "No modes read from edid\n");
++ dev_info(&hdmi->pdev->dev, "create default modelist\n");
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++
++ fb_var_to_videomode(&m, &hdmi->fbi->var);
++ fb_add_videomode(&m, &hdmi->fbi->modelist);
++
++ /*Add all no interlaced CEA mode to default modelist */
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ mode = &mxc_cea_mode[i];
++ if (mode->xres != 0)
++ fb_add_videomode(mode, &hdmi->fbi->modelist);
++ }
++
++ fb_new_modelist(hdmi->fbi);
++
++ console_unlock();
++}
++
++static void mxc_hdmi_set_mode(struct mxc_hdmi *hdmi)
++{
++ const struct fb_videomode *mode;
++ struct fb_videomode m;
++ struct fb_var_screeninfo var;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Set the default mode only once. */
++ if (!hdmi->dft_mode_set) {
++ fb_videomode_to_var(&var, &hdmi->default_mode);
++ hdmi->dft_mode_set = true;
++ hdmi->requesting_vga_for_initialization = false;
++ } else
++ memcpy(&var, &hdmi->previous_non_vga_mode,
++ sizeof(struct fb_var_screeninfo));
++
++ fb_var_to_videomode(&m, &var);
++
++ mode = mxc_fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ if (!mode) {
++ pr_err("%s: could not find mode in modelist\n", __func__);
++ return;
++ }
++
++ /* If video mode same as previous, init HDMI again */
++ if (fb_mode_is_equal(&m, mode) && hdmi->edid_status == HDMI_EDID_SAME) {
++ dev_dbg(&hdmi->pdev->dev,
++ "%s: Video mode and EDID same as previous\n", __func__);
++ /* update fbi mode in case modelist is updated */
++ hdmi->fbi->mode = (struct fb_videomode *)mode;
++ memcpy(&hdmi->fbi->var, &hdmi->previous_non_vga_mode,
++ sizeof(struct fb_var_screeninfo));
++ /* update hdmi setting in case EDID data updated */
++ mxc_hdmi_setup(hdmi, 0);
++ } else if (fb_mode_is_equal(&m, mode)) {
++ dev_dbg(&hdmi->pdev->dev,
++ "%s: Video mode same as previous\n", __func__);
++ /* update fbi mode in case modelist is updated */
++ hdmi->fbi->mode = (struct fb_videomode *)mode;
++ dump_fb_videomode(hdmi->fbi->mode);
++ memcpy(&hdmi->fbi->var, &hdmi->previous_non_vga_mode,
++ sizeof(struct fb_var_screeninfo));
++ mxc_hdmi_notify_fb(hdmi);
++ } else {
++ dev_dbg(&hdmi->pdev->dev, "%s: New video mode\n", __func__);
++ fb_videomode_to_var(&hdmi->fbi->var, mode);
++ dump_fb_videomode((struct fb_videomode *)mode);
++ mxc_hdmi_notify_fb(hdmi);
++ }
++
++}
++
++static void mxc_hdmi_cable_connected(struct mxc_hdmi *hdmi)
++{
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ hdmi->cable_plugin = true;
++
++ /* HDMI Initialization Step C */
++ hdmi->edid_status = mxc_hdmi_read_edid(hdmi);
++
++ /* Read EDID again if first EDID read failed */
++ if (hdmi->edid_status == HDMI_EDID_NO_MODES ||
++ hdmi->edid_status == HDMI_EDID_FAIL) {
++ dev_info(&hdmi->pdev->dev, "Read EDID again\n");
++ msleep(25);
++ hdmi->edid_status = mxc_hdmi_read_edid(hdmi);
++ }
++
++ /* HDMI Initialization Steps D, E, F */
++ switch (hdmi->edid_status) {
++ case HDMI_EDID_SUCCESS:
++ mxc_hdmi_edid_rebuild_modelist(hdmi);
++ break;
++
++ /* Nothing to do if EDID same */
++ case HDMI_EDID_SAME:
++ break;
++
++ case HDMI_EDID_FAIL:
++ mxc_hdmi_default_edid_cfg(hdmi);
++ /* No break here */
++ case HDMI_EDID_NO_MODES:
++ default:
++ mxc_hdmi_default_modelist(hdmi);
++ break;
++ }
++
++ /* Save edid cfg for audio driver */
++ hdmi_set_edid_cfg(hdmi->edid_status, &hdmi->edid_cfg);
++
++ /* Setting video mode */
++ mxc_hdmi_set_mode(hdmi);
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++}
++
++static int mxc_hdmi_power_on(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++
++ dev_dbg(&hdmi->pdev->dev, "%s reg %d cable %d blank %d\n", __func__, hdmi->fb_reg, hdmi->cable_plugin, hdmi->blank);
++
++ mxc_hdmi_phy_init(hdmi);
++ hdmi_clk_regenerator_update_pixel_clock(hdmi->fbi->var.pixclock);
++ return 0;
++}
++
++static void mxc_hdmi_power_off(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ mxc_hdmi_phy_disable(hdmi);
++}
++
++static void mxc_hdmi_cable_disconnected(struct mxc_hdmi *hdmi)
++{
++ u8 clkdis;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Save CEC clock */
++ clkdis = hdmi_readb(HDMI_MC_CLKDIS) & HDMI_MC_CLKDIS_CECCLK_DISABLE;
++ clkdis |= ~HDMI_MC_CLKDIS_CECCLK_DISABLE;
++
++ /* Disable All HDMI clock */
++ hdmi_writeb(0x7f, HDMI_MC_CLKDIS);
++
++ mxc_hdmi_phy_disable(hdmi);
++
++ //hdmi_disable_overflow_interrupts();
++ hdmi_writeb(clkdis, HDMI_MC_CLKDIS);
++
++ hdmi->cable_plugin = false;
++}
++
++static void hotplug_worker(struct work_struct *work)
++{
++ struct mxc_hdmi *hdmi =
++ container_of(work, struct mxc_hdmi, hotplug_work);
++ u32 hdmi_phy_stat0, hdmi_phy_pol0, hdmi_phy_mask0;
++ unsigned long flags;
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++ u32 l;
++
++ hdmi_phy_stat0 = hdmi_readb(HDMI_PHY_STAT0);
++ hdmi_phy_pol0 = hdmi_readb(HDMI_PHY_POL0);
++
++ dev_dbg(&hdmi->pdev->dev, "phy_int_stat=0x%x/0x%x, phy_int_pol=0x%x, plug_event=0x%x, plug_mask=0x%x\n",
++ hdmi_phy_stat0, hdmi->latest_intr_stat, hdmi_phy_pol0, hdmi->plug_event, hdmi->plug_mask);
++
++ /* Make HPD intr active low to capture unplug event or
++ * active high to capture plugin event */
++ hdmi_writeb((hdmi->plug_mask & ~hdmi_phy_pol0), HDMI_PHY_POL0);
++
++ /* cable connection changes */
++ if (hdmi_phy_pol0 & hdmi->plug_mask) {
++ /* Plugin event */
++ dev_dbg(&hdmi->pdev->dev, "EVENT=plugin\n");
++ mxc_hdmi_cable_connected(hdmi);
++
++ sprintf(event_string, "EVENT=plugin");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++#ifdef CONFIG_MXC_HDMI_CEC
++ memcpy(&l, &hdmi->edid_cfg.physical_address, 4 *sizeof(u8));
++ mxc_hdmi_cec_handle(l);
++#endif
++ hdmi_set_cable_state(1);
++ } else {
++ /* Plugout event */
++ dev_dbg(&hdmi->pdev->dev, "EVENT=plugout\n");
++ hdmi_set_cable_state(0);
++ mxc_hdmi_abort_stream();
++ mxc_hdmi_cable_disconnected(hdmi);
++
++ sprintf(event_string, "EVENT=plugout");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++#ifdef CONFIG_MXC_HDMI_CEC
++ mxc_hdmi_cec_handle(0x0);
++#endif
++ }
++
++ /* Lock here to ensure full powerdown sequence
++ * completed before next interrupt processed */
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ /* Re-enable HPD interrupts */
++ hdmi_phy_mask0 = hdmi_readb(HDMI_PHY_MASK0);
++ hdmi_phy_mask0 &= ~hdmi->plug_mask;
++ hdmi_writeb(hdmi_phy_mask0, HDMI_PHY_MASK0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ if (hdmi_readb(HDMI_IH_FC_STAT2) & HDMI_IH_FC_STAT2_OVERFLOW_MASK)
++ mxc_hdmi_clear_overflow(hdmi);
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++ pr_debug("%s exit\n", __func__);
++}
++
++static void hotplug_work_launch(unsigned long data)
++{
++ struct mxc_hdmi *hdmi = (struct mxc_hdmi *)data;
++ pr_debug("%s\n", __func__);
++ schedule_work(&hdmi->hotplug_work);
++}
++
++static void hdcp_hdp_worker(struct work_struct *work)
++{
++ struct delayed_work *delay_work = to_delayed_work(work);
++ struct mxc_hdmi *hdmi =
++ container_of(delay_work, struct mxc_hdmi, hdcp_hdp_work);
++ char event_string[32];
++ char *envp[] = { event_string, NULL };
++
++ /* HDCP interrupt */
++ sprintf(event_string, "EVENT=hdcpint");
++ kobject_uevent_env(&hdmi->pdev->dev.kobj, KOBJ_CHANGE, envp);
++
++ /* Unmute interrupts in HDCP application*/
++}
++
++static irqreturn_t mxc_hdmi_hotplug(int irq, void *data)
++{
++ struct mxc_hdmi *hdmi = data;
++ u8 val, intr_stat;
++ unsigned long flags;
++
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ /* Check and clean packet overflow interrupt.*/
++ if (hdmi_readb(HDMI_IH_FC_STAT2) &
++ HDMI_IH_FC_STAT2_OVERFLOW_MASK) {
++ mxc_hdmi_clear_overflow(hdmi);
++
++ dev_dbg(&hdmi->pdev->dev, "Overflow interrupt received\n");
++ /* clear irq status */
++ hdmi_writeb(HDMI_IH_FC_STAT2_OVERFLOW_MASK,
++ HDMI_IH_FC_STAT2);
++ }
++
++ /*
++ * We could not disable the irq. Probably the audio driver
++ * has enabled it. Masking off the HDMI interrupts using
++ * HDMI registers.
++ */
++ /* Capture status - used in hotplug_worker ISR */
++ intr_stat = hdmi_readb(HDMI_IH_PHY_STAT0);
++ if (intr_stat & hdmi->plug_event) {
++
++ dev_dbg(&hdmi->pdev->dev, "Hotplug interrupt received\n");
++ hdmi->latest_intr_stat = intr_stat;
++
++ /* Mute interrupts until handled */
++
++ val = hdmi_readb(HDMI_IH_MUTE_PHY_STAT0);
++ val |= hdmi->plug_event;
++ hdmi_writeb(val, HDMI_IH_MUTE_PHY_STAT0);
++
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val |= hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ if(hdmi_inited) {
++ if (!hdmi->dft_mode_set)
++ mod_timer(&hdmi->jitter_timer, jiffies + msecs_to_jiffies(20));
++ else
++ mod_timer(&hdmi->jitter_timer, jiffies + HZ);
++ }
++ }
++
++ /* Check HDCP interrupt state */
++ if (hdmi->hdmi_data.hdcp_enable) {
++ val = hdmi_readb(HDMI_A_APIINTSTAT);
++ if (val != 0) {
++ /* Mute interrupts until interrupt handled */
++ val = 0x7F;
++ hdmi_writeb(val, HDMI_A_APIINTMSK);
++ schedule_delayed_work(&(hdmi->hdcp_hdp_work), msecs_to_jiffies(50));
++ }
++ }
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++ return IRQ_HANDLED;
++}
++
++static void mxc_hdmi_setup(struct mxc_hdmi *hdmi, unsigned long event)
++{
++ struct fb_videomode m;
++ const struct fb_videomode *edid_mode;
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_var_to_videomode(&m, &hdmi->fbi->var);
++
++ dev_dbg(&hdmi->pdev->dev, "%s - video mode changed\n", __func__);
++
++ hdmi->vic = 0;
++ if (!hdmi->requesting_vga_for_initialization) {
++ /* Save mode if this isn't the result of requesting
++ * vga default. */
++ memcpy(&hdmi->previous_non_vga_mode, &hdmi->fbi->var,
++ sizeof(struct fb_var_screeninfo));
++ if (!list_empty(&hdmi->fbi->modelist)) {
++ edid_mode = mxc_fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ pr_debug("edid mode vx:%d vy:%d", hdmi->fbi->var.xres_virtual, hdmi->fbi->var.yres_virtual);
++ dump_fb_videomode((struct fb_videomode *)edid_mode);
++ /* update fbi mode */
++ hdmi->fbi->mode = (struct fb_videomode *)edid_mode;
++ hdmi->vic = mxc_edid_mode_to_vic(edid_mode, 0);
++ }
++ }
++
++ hdmi_disable_overflow_interrupts();
++
++ dev_dbg(&hdmi->pdev->dev, "CEA mode used vic=%d\n", hdmi->vic);
++ if (hdmi->edid_cfg.hdmi_cap || !hdmi->edid_status) {
++ hdmi_set_dvi_mode(0);
++ hdmi->hdmi_data.video_mode.mDVI = false;
++ } else {
++ hdmi_set_dvi_mode(1);
++ dev_dbg(&hdmi->pdev->dev, "CEA mode vic=%d work in DVI\n", hdmi->vic);
++ hdmi->hdmi_data.video_mode.mDVI = true;
++ }
++
++ if ((hdmi->vic == 6) || (hdmi->vic == 7) ||
++ (hdmi->vic == 21) || (hdmi->vic == 22) ||
++ (hdmi->vic == 2) || (hdmi->vic == 3) ||
++ (hdmi->vic == 17) || (hdmi->vic == 18))
++ hdmi->hdmi_data.colorimetry = eITU601;
++ else
++ hdmi->hdmi_data.colorimetry = eITU709;
++
++ if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
++ (hdmi->vic == 12) || (hdmi->vic == 13) ||
++ (hdmi->vic == 14) || (hdmi->vic == 15) ||
++ (hdmi->vic == 25) || (hdmi->vic == 26) ||
++ (hdmi->vic == 27) || (hdmi->vic == 28) ||
++ (hdmi->vic == 29) || (hdmi->vic == 30) ||
++ (hdmi->vic == 35) || (hdmi->vic == 36) ||
++ (hdmi->vic == 37) || (hdmi->vic == 38))
++ hdmi->hdmi_data.video_mode.mPixelRepetitionOutput = 1;
++ else
++ hdmi->hdmi_data.video_mode.mPixelRepetitionOutput = 0;
++
++ hdmi->hdmi_data.video_mode.mPixelRepetitionInput = 0;
++
++ /* TODO: Get input format from IPU (via FB driver iface) */
++ hdmi->hdmi_data.enc_in_format = RGB;
++
++ hdmi->hdmi_data.enc_out_format = RGB;
++
++ /* YCbCr only enabled in HDMI mode */
++ if (!hdmi->hdmi_data.video_mode.mDVI &&
++ !hdmi->hdmi_data.rgb_out_enable) {
++ if (hdmi->edid_cfg.cea_ycbcr444)
++ hdmi->hdmi_data.enc_out_format = YCBCR444;
++ else if (hdmi->edid_cfg.cea_ycbcr422)
++ hdmi->hdmi_data.enc_out_format = YCBCR422_8BITS;
++ }
++
++ /* IPU not support depth color output */
++ hdmi->hdmi_data.enc_color_depth = 8;
++ hdmi->hdmi_data.pix_repet_factor = 0;
++ hdmi->hdmi_data.video_mode.mDataEnablePolarity = true;
++
++ /* HDMI Initialization Step B.1 */
++ hdmi_av_composer(hdmi);
++
++ /* HDMI Initializateion Step B.2 */
++ mxc_hdmi_phy_init(hdmi);
++
++ /* HDMI Initialization Step B.3 */
++ mxc_hdmi_enable_video_path(hdmi);
++
++ /* not for DVI mode */
++ if (hdmi->hdmi_data.video_mode.mDVI)
++ dev_dbg(&hdmi->pdev->dev, "%s DVI mode\n", __func__);
++ else {
++ dev_dbg(&hdmi->pdev->dev, "%s CEA mode\n", __func__);
++
++ /* HDMI Initialization Step E - Configure audio */
++ hdmi_enable_audio_clk(hdmi);
++
++ /* HDMI Initialization Step F - Configure AVI InfoFrame */
++ hdmi_config_AVI(hdmi);
++ }
++
++ hdmi_video_packetize(hdmi);
++ hdmi_video_csc(hdmi);
++ hdmi_video_sample(hdmi);
++
++ mxc_hdmi_clear_overflow(hdmi);
++
++ if (hdmi->cable_plugin && !hdmi->hdmi_data.video_mode.mDVI)
++ hdmi_enable_overflow_interrupts();
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n\n", __func__);
++
++}
++
++/* Wait until we are registered to enable interrupts */
++static void mxc_hdmi_fb_registered(struct mxc_hdmi *hdmi)
++{
++ unsigned long flags;
++
++ if (hdmi->fb_reg)
++ return;
++
++ spin_lock_irqsave(&hdmi->irq_lock, flags);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ hdmi_writeb(HDMI_PHY_I2CM_INT_ADDR_DONE_POL,
++ HDMI_PHY_I2CM_INT_ADDR);
++
++ hdmi_writeb(HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL |
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL,
++ HDMI_PHY_I2CM_CTLINT_ADDR);
++
++ /* enable cable hot plug irq */
++ hdmi_writeb(~hdmi->plug_mask, HDMI_PHY_MASK0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ hdmi->fb_reg = true;
++
++ spin_unlock_irqrestore(&hdmi->irq_lock, flags);
++
++}
++
++static int mxc_hdmi_fb_event(struct notifier_block *nb,
++ unsigned long val, void *v)
++{
++ struct fb_event *event = v;
++ struct mxc_hdmi *hdmi = container_of(nb, struct mxc_hdmi, nb);
++
++ if (strcmp(event->info->fix.id, hdmi->fbi->fix.id))
++ return 0;
++
++ switch (val) {
++ case FB_EVENT_FB_REGISTERED:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_FB_REGISTERED\n");
++ mxc_hdmi_fb_registered(hdmi);
++ hdmi_set_registered(1);
++ break;
++
++ case FB_EVENT_FB_UNREGISTERED:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_FB_UNREGISTERED\n");
++ hdmi->fb_reg = false;
++ hdmi_set_registered(0);
++ break;
++
++ case FB_EVENT_MODE_CHANGE:
++ dev_dbg(&hdmi->pdev->dev, "event=FB_EVENT_MODE_CHANGE\n");
++ if (hdmi->fb_reg)
++ mxc_hdmi_setup(hdmi, val);
++ break;
++
++ case FB_EVENT_BLANK:
++ if ((*((int *)event->data) == FB_BLANK_UNBLANK) &&
++ (*((int *)event->data) != hdmi->blank)) {
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_BLANK - UNBLANK\n");
++
++ hdmi->blank = *((int *)event->data);
++
++ /* Re-enable HPD interrupts */
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val &= ~hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ /* Unmute interrupts */
++ hdmi_writeb(~hdmi->plug_event, HDMI_IH_MUTE_PHY_STAT0);
++
++ if (hdmi->fb_reg && hdmi->cable_plugin)
++ mxc_hdmi_setup(hdmi, val);
++ hdmi_set_blank_state(1);
++
++ } else if (*((int *)event->data) != hdmi->blank) {
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_BLANK - BLANK\n");
++ hdmi_set_blank_state(0);
++ mxc_hdmi_abort_stream();
++
++ mxc_hdmi_phy_disable(hdmi);
++
++ if(hdmi->plug_mask == HDMI_DVI_STAT) {
++ u8 val;
++ pr_info("In DVI Mode disable interrupts\n");
++ val = hdmi_readb(HDMI_IH_MUTE_PHY_STAT0);
++ val |= hdmi->plug_event;
++ hdmi_writeb(val, HDMI_IH_MUTE_PHY_STAT0);
++
++ val = hdmi_readb(HDMI_PHY_MASK0);
++ val |= hdmi->plug_mask;
++ hdmi_writeb(val, HDMI_PHY_MASK0);
++
++ hdmi_set_dvi_mode(1);
++ }
++
++ hdmi->blank = *((int *)event->data);
++ } else
++ dev_dbg(&hdmi->pdev->dev,
++ "FB BLANK state no changed!\n");
++
++ break;
++
++ case FB_EVENT_SUSPEND:
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_SUSPEND\n");
++
++ if (hdmi->blank == FB_BLANK_UNBLANK) {
++ mxc_hdmi_phy_disable(hdmi);
++ clk_disable(hdmi->hdmi_iahb_clk);
++ clk_disable(hdmi->hdmi_isfr_clk);
++ }
++ break;
++
++ case FB_EVENT_RESUME:
++ dev_dbg(&hdmi->pdev->dev,
++ "event=FB_EVENT_RESUME\n");
++
++ if (hdmi->blank == FB_BLANK_UNBLANK) {
++ clk_enable(hdmi->hdmi_iahb_clk);
++ clk_enable(hdmi->hdmi_isfr_clk);
++ mxc_hdmi_phy_init(hdmi);
++ }
++ break;
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++static void hdmi_init_route(struct mxc_hdmi *hdmi)
++{
++ uint32_t hdmi_mux_setting, reg;
++ int ipu_id, disp_id;
++
++ ipu_id = mxc_hdmi_ipu_id;
++ disp_id = mxc_hdmi_disp_id;
++
++ if ((ipu_id > 1) || (ipu_id < 0)) {
++ pr_err("Invalid IPU select for HDMI: %d. Set to 0\n", ipu_id);
++ ipu_id = 0;
++ }
++
++ if ((disp_id > 1) || (disp_id < 0)) {
++ pr_err("Invalid DI select for HDMI: %d. Set to 0\n", disp_id);
++ disp_id = 0;
++ }
++
++ reg = readl(hdmi->gpr_hdmi_base);
++
++ /* Configure the connection between IPU1/2 and HDMI */
++ hdmi_mux_setting = 2*ipu_id + disp_id;
++
++ /* GPR3, bits 2-3 = HDMI_MUX_CTL */
++ reg &= ~0xd;
++ reg |= hdmi_mux_setting << 2;
++
++ writel(reg, hdmi->gpr_hdmi_base);
++
++ /* Set HDMI event as SDMA event2 for HDMI audio */
++ reg = readl(hdmi->gpr_sdma_base);
++ reg |= 0x1;
++ writel(reg, hdmi->gpr_sdma_base);
++}
++
++static void hdmi_hdcp_get_property(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++
++ /* Check hdcp enable by dts.*/
++ hdcp_init = of_property_read_bool(np, "fsl,hdcp");
++ if (hdcp_init)
++ dev_dbg(&pdev->dev, "hdcp enable\n");
++ else
++ dev_dbg(&pdev->dev, "hdcp disable\n");
++}
++
++static void hdmi_get_of_property(struct mxc_hdmi *hdmi)
++{
++ struct platform_device *pdev = hdmi->pdev;
++ struct device_node *np = pdev->dev.of_node;
++ const struct of_device_id *of_id =
++ of_match_device(imx_hdmi_dt_ids, &pdev->dev);
++ int ret;
++ u32 phy_reg_vlev = 0, phy_reg_cksymtx = 0;
++
++ if (of_id) {
++ pdev->id_entry = of_id->data;
++ hdmi->cpu_type = pdev->id_entry->driver_data;
++ }
++
++ /* HDMI PHY register vlev and cksymtx preperty is optional.
++ * It is for specific board to pass HCT electrical part.
++ * Default value will been setting in HDMI PHY config function
++ * if it is not define in device tree.
++ */
++ ret = of_property_read_u32(np, "fsl,phy_reg_vlev", &phy_reg_vlev);
++ if (ret)
++ dev_dbg(&pdev->dev, "No board specific HDMI PHY vlev\n");
++
++ ret = of_property_read_u32(np, "fsl,phy_reg_cksymtx", &phy_reg_cksymtx);
++ if (ret)
++ dev_dbg(&pdev->dev, "No board specific HDMI PHY cksymtx\n");
++
++ /* Specific phy config */
++ hdmi->phy_config.reg_cksymtx = phy_reg_cksymtx;
++ hdmi->phy_config.reg_vlev = phy_reg_vlev;
++
++}
++
++/* HDMI Initialization Step A */
++static int mxc_hdmi_disp_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret = 0;
++ u32 i;
++ const struct fb_videomode *mode;
++ struct fb_videomode m;
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++ int irq = platform_get_irq(hdmi->pdev, 0);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ /* Check hdmi disp init once */
++ if (hdmi_inited) {
++ dev_err(&hdmi->pdev->dev,
++ "Error only one HDMI output support now!\n");
++ return -1;
++ }
++
++ hdmi_get_of_property(hdmi);
++
++ if (irq < 0)
++ return -ENODEV;
++
++ /* Setting HDMI default to blank state */
++ hdmi->blank = FB_BLANK_POWERDOWN;
++
++ setting->dev_id = mxc_hdmi_ipu_id;
++ setting->disp_id = mxc_hdmi_disp_id;
++ setting->if_fmt = IPU_PIX_FMT_RGB24;
++
++ hdmi->dft_mode_str = setting->dft_mode_str;
++ hdmi->default_bpp = setting->default_bpp;
++ dev_dbg(&hdmi->pdev->dev, "%s - default mode %s bpp=%d\n",
++ __func__, hdmi->dft_mode_str, hdmi->default_bpp);
++
++ hdmi->fbi = setting->fbi;
++
++ hdmi_init_route(hdmi);
++
++ hdmi->hdmi_isfr_clk = clk_get(&hdmi->pdev->dev, "hdmi_isfr");
++ if (IS_ERR(hdmi->hdmi_isfr_clk)) {
++ ret = PTR_ERR(hdmi->hdmi_isfr_clk);
++ dev_err(&hdmi->pdev->dev,
++ "Unable to get HDMI clk: %d\n", ret);
++ goto egetclk1;
++ }
++
++ ret = clk_prepare_enable(hdmi->hdmi_isfr_clk);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Cannot enable HDMI isfr clock: %d\n", ret);
++ goto erate1;
++ }
++
++ hdmi->hdmi_iahb_clk = clk_get(&hdmi->pdev->dev, "hdmi_iahb");
++ if (IS_ERR(hdmi->hdmi_iahb_clk)) {
++ ret = PTR_ERR(hdmi->hdmi_iahb_clk);
++ dev_err(&hdmi->pdev->dev,
++ "Unable to get HDMI clk: %d\n", ret);
++ goto egetclk2;
++ }
++
++ ret = clk_prepare_enable(hdmi->hdmi_iahb_clk);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Cannot enable HDMI iahb clock: %d\n", ret);
++ goto erate2;
++ }
++
++ dev_dbg(&hdmi->pdev->dev, "Enabled HDMI clocks\n");
++
++ /* Init DDC pins for HDCP */
++ if (hdcp_init) {
++ hdmi->pinctrl = devm_pinctrl_get_select_default(&hdmi->pdev->dev);
++ if (IS_ERR(hdmi->pinctrl)) {
++ dev_err(&hdmi->pdev->dev, "can't get/select DDC pinctrl\n");
++ goto erate2;
++ }
++ }
++
++ /* Product and revision IDs */
++ dev_info(&hdmi->pdev->dev,
++ "Detected HDMI controller 0x%x:0x%x:0x%x:0x%x\n",
++ hdmi_readb(HDMI_DESIGN_ID),
++ hdmi_readb(HDMI_REVISION_ID),
++ hdmi_readb(HDMI_PRODUCT_ID0),
++ hdmi_readb(HDMI_PRODUCT_ID1));
++
++ /* To prevent overflows in HDMI_IH_FC_STAT2, set the clk regenerator
++ * N and cts values before enabling phy */
++ hdmi_init_clk_regenerator();
++
++ INIT_LIST_HEAD(&hdmi->fbi->modelist);
++
++ spin_lock_init(&hdmi->irq_lock);
++
++ /* Set the default mode and modelist when disp init. */
++ fb_find_mode(&hdmi->fbi->var, hdmi->fbi,
++ hdmi->dft_mode_str, /*NULL, 0, NULL,*/mxc_cea_mode, ARRAY_SIZE(mxc_cea_mode), NULL,
++ hdmi->default_bpp);
++
++ console_lock();
++
++ fb_destroy_modelist(&hdmi->fbi->modelist);
++
++ /*Add all no interlaced CEA mode to default modelist */
++ for (i = 0; i < ARRAY_SIZE(mxc_cea_mode); i++) {
++ mode = &mxc_cea_mode[i];
++ if (mode->xres != 0)
++ fb_add_videomode(mode, &hdmi->fbi->modelist);
++ }
++
++ console_unlock();
++
++ /* Find a nearest mode in default modelist */
++ fb_var_to_videomode(&m, &hdmi->fbi->var);
++ dump_fb_videomode(&m);
++ hdmi->dft_mode_set = false;
++ /* Save default video mode */
++ memcpy(&hdmi->default_mode, &m, sizeof(struct fb_videomode));
++
++ mode = mxc_fb_find_nearest_mode(&m, &hdmi->fbi->modelist);
++ if (!mode) {
++ pr_err("%s: could not find mode in modelist\n", __func__);
++ return -1;
++ }
++
++ fb_videomode_to_var(&hdmi->fbi->var, mode);
++
++ /* update fbi mode */
++ hdmi->fbi->mode = (struct fb_videomode *)mode;
++
++ /* Default setting HDMI working in HDMI mode*/
++ hdmi->edid_cfg.hdmi_cap = true;
++
++ hdmi->plug_event = HDMI_DVI_IH_STAT;
++ hdmi->plug_mask = HDMI_DVI_STAT;
++
++ setup_timer(&hdmi->jitter_timer, hotplug_work_launch, (unsigned long)hdmi);
++ INIT_WORK(&hdmi->hotplug_work, hotplug_worker);
++ INIT_DELAYED_WORK(&hdmi->hdcp_hdp_work, hdcp_hdp_worker);
++
++ /* Configure registers related to HDMI interrupt
++ * generation before registering IRQ. */
++ hdmi_writeb(hdmi->plug_mask, HDMI_PHY_POL0);
++
++ /* Clear Hotplug interrupts */
++ hdmi_writeb(hdmi->plug_event, HDMI_IH_PHY_STAT0);
++
++ hdmi->nb.notifier_call = mxc_hdmi_fb_event;
++ ret = fb_register_client(&hdmi->nb);
++ if (ret < 0)
++ goto efbclient;
++
++ memset(&hdmi->hdmi_data, 0, sizeof(struct hdmi_data_info));
++
++ /* Default HDMI working in RGB mode */
++ hdmi->hdmi_data.rgb_out_enable = true;
++
++ if (!strcasecmp(rgb_quant_range, "limited")) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE;
++ } else if (!strcasecmp(rgb_quant_range, "full")) {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE;
++ } else {
++ hdmi->hdmi_data.rgb_quant_range = HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT;
++ }
++
++ if (!strcasecmp(enable_3d, "disable")) {
++ hdmi->hdmi_data.enable_3d = 0;
++ } else if (!strcasecmp(enable_3d, "0")) {
++ hdmi->hdmi_data.enable_3d = 0;
++ } else {
++ hdmi->hdmi_data.enable_3d = 1;
++ }
++
++ if (!strcasecmp(enable_fract, "disable")) {
++ hdmi->hdmi_data.enable_fract = 0;
++ } else if (!strcasecmp(enable_fract, "0")) {
++ hdmi->hdmi_data.enable_fract = 0;
++ } else {
++ hdmi->hdmi_data.enable_fract = 1;
++ }
++
++ ret = devm_request_irq(&hdmi->pdev->dev, irq, mxc_hdmi_hotplug, IRQF_SHARED,
++ dev_name(&hdmi->pdev->dev), hdmi);
++ if (ret < 0) {
++ dev_err(&hdmi->pdev->dev,
++ "Unable to request irq: %d\n", ret);
++ goto ereqirq;
++ }
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_fb_name);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for fb name\n");
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_cable_state);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for cable state\n");
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_edid);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for edid\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_rgb_out_enable);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for rgb out enable\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_rgb_quant_range);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for rgb quant range\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_enable_3d);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for enable_3d\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_enable_fract);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for enable_fract\n");
++
++ ret = device_create_file(&hdmi->pdev->dev, &dev_attr_hdcp_enable);
++ if (ret < 0)
++ dev_warn(&hdmi->pdev->dev,
++ "cound not create sys node for hdcp enable\n");
++
++ dev_dbg(&hdmi->pdev->dev, "%s exit\n", __func__);
++
++ hdmi_inited = true;
++
++ return ret;
++
++efbclient:
++ free_irq(irq, hdmi);
++ereqirq:
++ clk_disable_unprepare(hdmi->hdmi_iahb_clk);
++erate2:
++ clk_put(hdmi->hdmi_iahb_clk);
++egetclk2:
++ clk_disable_unprepare(hdmi->hdmi_isfr_clk);
++erate1:
++ clk_put(hdmi->hdmi_isfr_clk);
++egetclk1:
++ dev_dbg(&hdmi->pdev->dev, "%s error exit\n", __func__);
++
++ return ret;
++}
++
++static void mxc_hdmi_disp_deinit(struct mxc_dispdrv_handle *disp)
++{
++ struct mxc_hdmi *hdmi = mxc_dispdrv_getdata(disp);
++
++ dev_dbg(&hdmi->pdev->dev, "%s\n", __func__);
++
++ fb_unregister_client(&hdmi->nb);
++
++ clk_disable_unprepare(hdmi->hdmi_isfr_clk);
++ clk_put(hdmi->hdmi_isfr_clk);
++ clk_disable_unprepare(hdmi->hdmi_iahb_clk);
++ clk_put(hdmi->hdmi_iahb_clk);
++
++ platform_device_unregister(hdmi->pdev);
++
++ hdmi_inited = false;
++}
++
++static struct mxc_dispdrv_driver mxc_hdmi_drv = {
++ .name = DISPDRV_HDMI,
++ .init = mxc_hdmi_disp_init,
++ .deinit = mxc_hdmi_disp_deinit,
++ .enable = mxc_hdmi_power_on,
++ .disable = mxc_hdmi_power_off,
++};
++
++
++static int mxc_hdmi_open(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static long mxc_hdmi_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int __user *argp = (void __user *)arg;
++ int ret = 0;
++
++ switch (cmd) {
++ case HDMI_IOC_GET_RESOURCE:
++ ret = copy_to_user(argp, &g_hdmi->hdmi_data,
++ sizeof(g_hdmi->hdmi_data)) ? -EFAULT : 0;
++ break;
++ case HDMI_IOC_GET_CPU_TYPE:
++ *argp = g_hdmi->cpu_type;
++ break;
++ default:
++ pr_debug("Unsupport cmd %d\n", cmd);
++ break;
++ }
++ return ret;
++}
++
++static int mxc_hdmi_release(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static const struct file_operations mxc_hdmi_fops = {
++ .owner = THIS_MODULE,
++ .open = mxc_hdmi_open,
++ .release = mxc_hdmi_release,
++ .unlocked_ioctl = mxc_hdmi_ioctl,
++};
++
++
++static int mxc_hdmi_probe(struct platform_device *pdev)
++{
++ struct mxc_hdmi *hdmi;
++ struct device *temp_class;
++ struct resource *res;
++ int ret = 0;
++
++ /* Check I2C driver is loaded and available
++ * check hdcp function is enable by dts */
++ hdmi_hdcp_get_property(pdev);
++ if (!hdmi_i2c && !hdcp_init)
++ return -ENODEV;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res)
++ return -ENOENT;
++
++ hdmi = devm_kzalloc(&pdev->dev,
++ sizeof(struct mxc_hdmi),
++ GFP_KERNEL);
++ if (!hdmi) {
++ dev_err(&pdev->dev, "Cannot allocate device data\n");
++ ret = -ENOMEM;
++ goto ealloc;
++ }
++ g_hdmi = hdmi;
++
++ hdmi_major = register_chrdev(hdmi_major, "mxc_hdmi", &mxc_hdmi_fops);
++ if (hdmi_major < 0) {
++ printk(KERN_ERR "HDMI: unable to get a major for HDMI\n");
++ ret = -EBUSY;
++ goto ealloc;
++ }
++
++ hdmi_class = class_create(THIS_MODULE, "mxc_hdmi");
++ if (IS_ERR(hdmi_class)) {
++ ret = PTR_ERR(hdmi_class);
++ goto err_out_chrdev;
++ }
++
++ temp_class = device_create(hdmi_class, NULL, MKDEV(hdmi_major, 0),
++ NULL, "mxc_hdmi");
++ if (IS_ERR(temp_class)) {
++ ret = PTR_ERR(temp_class);
++ goto err_out_class;
++ }
++
++ hdmi->pdev = pdev;
++
++ hdmi->core_pdev = platform_device_alloc("mxc_hdmi_core", -1);
++ if (!hdmi->core_pdev) {
++ pr_err("%s failed platform_device_alloc for hdmi core\n",
++ __func__);
++ ret = -ENOMEM;
++ goto ecore;
++ }
++
++ hdmi->gpr_base = ioremap(res->start, resource_size(res));
++ if (!hdmi->gpr_base) {
++ dev_err(&pdev->dev, "ioremap failed\n");
++ ret = -ENOMEM;
++ goto eiomap;
++ }
++
++ hdmi->gpr_hdmi_base = hdmi->gpr_base + 3;
++ hdmi->gpr_sdma_base = hdmi->gpr_base;
++
++ hdmi_inited = false;
++
++ hdmi->disp_mxc_hdmi = mxc_dispdrv_register(&mxc_hdmi_drv);
++ if (IS_ERR(hdmi->disp_mxc_hdmi)) {
++ dev_err(&pdev->dev, "Failed to register dispdrv - 0x%x\n",
++ (int)hdmi->disp_mxc_hdmi);
++ ret = (int)hdmi->disp_mxc_hdmi;
++ goto edispdrv;
++ }
++ mxc_dispdrv_setdata(hdmi->disp_mxc_hdmi, hdmi);
++
++ platform_set_drvdata(pdev, hdmi);
++
++ return 0;
++edispdrv:
++ iounmap(hdmi->gpr_base);
++eiomap:
++ platform_device_put(hdmi->core_pdev);
++ecore:
++ kfree(hdmi);
++err_out_class:
++ device_destroy(hdmi_class, MKDEV(hdmi_major, 0));
++ class_destroy(hdmi_class);
++err_out_chrdev:
++ unregister_chrdev(hdmi_major, "mxc_hdmi");
++ealloc:
++ return ret;
++}
++
++static int mxc_hdmi_remove(struct platform_device *pdev)
++{
++ struct mxc_hdmi *hdmi = platform_get_drvdata(pdev);
++ int irq = platform_get_irq(pdev, 0);
++
++ fb_unregister_client(&hdmi->nb);
++
++ mxc_dispdrv_puthandle(hdmi->disp_mxc_hdmi);
++ mxc_dispdrv_unregister(hdmi->disp_mxc_hdmi);
++ iounmap(hdmi->gpr_base);
++ /* No new work will be scheduled, wait for running ISR */
++ free_irq(irq, hdmi);
++ kfree(hdmi);
++ g_hdmi = NULL;
++
++ return 0;
++}
++
++static struct platform_driver mxc_hdmi_driver = {
++ .probe = mxc_hdmi_probe,
++ .remove = mxc_hdmi_remove,
++ .driver = {
++ .name = "mxc_hdmi",
++ .of_match_table = imx_hdmi_dt_ids,
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init mxc_hdmi_init(void)
++{
++ return platform_driver_register(&mxc_hdmi_driver);
++}
++module_init(mxc_hdmi_init);
++
++static void __exit mxc_hdmi_exit(void)
++{
++ if (hdmi_major > 0) {
++ device_destroy(hdmi_class, MKDEV(hdmi_major, 0));
++ class_destroy(hdmi_class);
++ unregister_chrdev(hdmi_major, "mxc_hdmi");
++ hdmi_major = 0;
++ }
++
++ platform_driver_unregister(&mxc_hdmi_driver);
++}
++module_exit(mxc_hdmi_exit);
++
++static int mxc_hdmi_i2c_probe(struct i2c_client *client,
++ const struct i2c_device_id *id)
++{
++ if (!i2c_check_functionality(client->adapter,
++ I2C_FUNC_SMBUS_BYTE | I2C_FUNC_I2C))
++ return -ENODEV;
++
++ hdmi_i2c = client;
++
++ return 0;
++}
++
++static int mxc_hdmi_i2c_remove(struct i2c_client *client)
++{
++ hdmi_i2c = NULL;
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_i2c_match[] = {
++ { .compatible = "fsl,imx6-hdmi-i2c", },
++ { /* sentinel */ }
++};
++
++static const struct i2c_device_id mxc_hdmi_i2c_id[] = {
++ { "mxc_hdmi_i2c", 0 },
++ {},
++};
++MODULE_DEVICE_TABLE(i2c, mxc_hdmi_i2c_id);
++
++static struct i2c_driver mxc_hdmi_i2c_driver = {
++ .driver = {
++ .name = "mxc_hdmi_i2c",
++ .of_match_table = imx_hdmi_i2c_match,
++ },
++ .probe = mxc_hdmi_i2c_probe,
++ .remove = mxc_hdmi_i2c_remove,
++ .id_table = mxc_hdmi_i2c_id,
++};
++
++static int __init mxc_hdmi_i2c_init(void)
++{
++ return i2c_add_driver(&mxc_hdmi_i2c_driver);
++}
++
++static void __exit mxc_hdmi_i2c_exit(void)
++{
++ i2c_del_driver(&mxc_hdmi_i2c_driver);
++}
++
++subsys_initcall(mxc_hdmi_i2c_init);
++module_exit(mxc_hdmi_i2c_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+diff -Nur linux-4.1.3/drivers/video/mxc/mxc_ipuv3_fb.c linux-xbian-imx6/drivers/video/mxc/mxc_ipuv3_fb.c
+--- linux-4.1.3/drivers/video/mxc/mxc_ipuv3_fb.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mxc_ipuv3_fb.c 2015-07-27 23:13:08.753749907 +0200
+@@ -0,0 +1,2579 @@
++/*
++ * Copyright 2004-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxcfb.c
++ *
++ * @brief MXC Frame buffer driver for SDC
++ *
++ * @ingroup Framebuffer
++ */
++
++/*!
++ * Include files
++ */
++#include <linux/clk.h>
++#include <linux/console.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/errno.h>
++#include <linux/fb.h>
++#include <linux/fsl_devices.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/ioport.h>
++#include <linux/ipu.h>
++#include <linux/ipu-v3.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++
++#include "mxc_dispdrv.h"
++
++/*
++ * Driver name
++ */
++#define MXCFB_NAME "mxc_sdc_fb"
++
++/* Display port number */
++#define MXCFB_PORT_NUM 2
++/*!
++ * Structure containing the MXC specific framebuffer information.
++ */
++struct mxcfb_info {
++ int default_bpp;
++ int cur_blank;
++ int next_blank;
++ ipu_channel_t ipu_ch;
++ int ipu_id;
++ int ipu_di;
++ u32 ipu_di_pix_fmt;
++ bool ipu_int_clk;
++ bool overlay;
++ bool alpha_chan_en;
++ bool late_init;
++ bool first_set_par;
++ dma_addr_t alpha_phy_addr0;
++ dma_addr_t alpha_phy_addr1;
++ void *alpha_virt_addr0;
++ void *alpha_virt_addr1;
++ uint32_t alpha_mem_len;
++ uint32_t ipu_ch_irq;
++ uint32_t ipu_ch_nf_irq;
++ uint32_t ipu_alp_ch_irq;
++ uint32_t cur_ipu_buf;
++ uint32_t cur_ipu_alpha_buf;
++
++ u32 pseudo_palette[16];
++
++ bool mode_found;
++ struct completion flip_complete;
++ struct completion alpha_flip_complete;
++ struct completion vsync_complete;
++
++ void *ipu;
++ struct fb_info *ovfbi;
++
++ struct mxc_dispdrv_handle *dispdrv;
++
++ struct fb_var_screeninfo cur_var;
++};
++
++struct mxcfb_pfmt {
++ u32 fb_pix_fmt;
++ int bpp;
++ struct fb_bitfield red;
++ struct fb_bitfield green;
++ struct fb_bitfield blue;
++ struct fb_bitfield transp;
++};
++
++static const struct mxcfb_pfmt mxcfb_pfmts[] = {
++ /* pixel bpp red green blue transp */
++ {IPU_PIX_FMT_RGB565, 16, {11, 5, 0}, { 5, 6, 0}, { 0, 5, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_RGB24, 24, { 0, 8, 0}, { 8, 8, 0}, {16, 8, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_BGR24, 24, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0}, { 0, 0, 0} },
++ {IPU_PIX_FMT_RGB32, 32, { 0, 8, 0}, { 8, 8, 0}, {16, 8, 0}, {24, 8, 0} },
++ {IPU_PIX_FMT_BGR32, 32, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0}, {24, 8, 0} },
++ {IPU_PIX_FMT_ABGR32, 32, {24, 8, 0}, {16, 8, 0}, { 8, 8, 0}, { 0, 8, 0} },
++};
++
++struct mxcfb_alloc_list {
++ struct list_head list;
++ dma_addr_t phy_addr;
++ void *cpu_addr;
++ u32 size;
++};
++
++enum {
++ BOTH_ON,
++ SRC_ON,
++ TGT_ON,
++ BOTH_OFF
++};
++
++static bool g_dp_in_use[2];
++LIST_HEAD(fb_alloc_list);
++
++/* Return default standard(RGB) pixel format */
++static uint32_t bpp_to_pixfmt(int bpp)
++{
++ uint32_t pixfmt = 0;
++
++ switch (bpp) {
++ case 24:
++ pixfmt = IPU_PIX_FMT_BGR24;
++ break;
++ case 32:
++ pixfmt = IPU_PIX_FMT_BGR32;
++ break;
++ case 16:
++ pixfmt = IPU_PIX_FMT_RGB565;
++ break;
++ }
++ return pixfmt;
++}
++
++static inline int bitfield_is_equal(struct fb_bitfield f1,
++ struct fb_bitfield f2)
++{
++ return !memcmp(&f1, &f2, sizeof(f1));
++}
++
++static int pixfmt_to_var(uint32_t pixfmt, struct fb_var_screeninfo *var)
++{
++ int i, ret = -1;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (pixfmt == mxcfb_pfmts[i].fb_pix_fmt) {
++ var->red = mxcfb_pfmts[i].red;
++ var->green = mxcfb_pfmts[i].green;
++ var->blue = mxcfb_pfmts[i].blue;
++ var->transp = mxcfb_pfmts[i].transp;
++ var->bits_per_pixel = mxcfb_pfmts[i].bpp;
++ ret = 0;
++ break;
++ }
++ }
++ return ret;
++}
++
++static int bpp_to_var(int bpp, struct fb_var_screeninfo *var)
++{
++ uint32_t pixfmt = 0;
++
++ pixfmt = bpp_to_pixfmt(bpp);
++ if (pixfmt)
++ return pixfmt_to_var(pixfmt, var);
++ else
++ return -1;
++}
++
++static int check_var_pixfmt(struct fb_var_screeninfo *var)
++{
++ int i, ret = -1;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (bitfield_is_equal(var->red, mxcfb_pfmts[i].red) &&
++ bitfield_is_equal(var->green, mxcfb_pfmts[i].green) &&
++ bitfield_is_equal(var->blue, mxcfb_pfmts[i].blue) &&
++ bitfield_is_equal(var->transp, mxcfb_pfmts[i].transp) &&
++ var->bits_per_pixel == mxcfb_pfmts[i].bpp) {
++ ret = 0;
++ break;
++ }
++ }
++ return ret;
++}
++
++static uint32_t fbi_to_pixfmt(struct fb_info *fbi)
++{
++ int i;
++ uint32_t pixfmt = 0;
++
++ if (fbi->var.nonstd)
++ return fbi->var.nonstd;
++
++ for (i = 0; i < ARRAY_SIZE(mxcfb_pfmts); i++) {
++ if (bitfield_is_equal(fbi->var.red, mxcfb_pfmts[i].red) &&
++ bitfield_is_equal(fbi->var.green, mxcfb_pfmts[i].green) &&
++ bitfield_is_equal(fbi->var.blue, mxcfb_pfmts[i].blue) &&
++ bitfield_is_equal(fbi->var.transp, mxcfb_pfmts[i].transp)) {
++ pixfmt = mxcfb_pfmts[i].fb_pix_fmt;
++ break;
++ }
++ }
++
++ if (pixfmt == 0)
++ dev_err(fbi->device, "cannot get pixel format\n");
++
++ return pixfmt;
++}
++
++static struct fb_info *found_registered_fb(ipu_channel_t ipu_ch, int ipu_id)
++{
++ int i;
++ struct mxcfb_info *mxc_fbi;
++ struct fb_info *fbi = NULL;
++
++ for (i = 0; i < num_registered_fb; i++) {
++ mxc_fbi =
++ ((struct mxcfb_info *)(registered_fb[i]->par));
++
++ if ((mxc_fbi->ipu_ch == ipu_ch) &&
++ (mxc_fbi->ipu_id == ipu_id)) {
++ fbi = registered_fb[i];
++ break;
++ }
++ }
++ return fbi;
++}
++
++static irqreturn_t mxcfb_irq_handler(int irq, void *dev_id);
++static irqreturn_t mxcfb_nf_irq_handler(int irq, void *dev_id);
++static int mxcfb_blank(int blank, struct fb_info *info);
++static int mxcfb_map_video_memory(struct fb_info *fbi);
++static int mxcfb_unmap_video_memory(struct fb_info *fbi);
++
++/*
++ * Set fixed framebuffer parameters based on variable settings.
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_set_fix(struct fb_info *info)
++{
++ struct fb_fix_screeninfo *fix = &info->fix;
++ struct fb_var_screeninfo *var = &info->var;
++
++ fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
++
++ fix->type = FB_TYPE_PACKED_PIXELS;
++ fix->accel = FB_ACCEL_NONE;
++ fix->visual = FB_VISUAL_TRUECOLOR;
++ fix->xpanstep = 1;
++ fix->ywrapstep = 1;
++ fix->ypanstep = 1;
++
++ return 0;
++}
++
++static int _setup_disp_channel1(struct fb_info *fbi)
++{
++ ipu_channel_params_t params;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ memset(&params, 0, sizeof(params));
++
++ if (mxc_fbi->ipu_ch == MEM_DC_SYNC) {
++ params.mem_dc_sync.di = mxc_fbi->ipu_di;
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ params.mem_dc_sync.interlaced = true;
++ params.mem_dc_sync.out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ params.mem_dc_sync.in_pixel_fmt = fbi_to_pixfmt(fbi);
++ } else {
++ params.mem_dp_bg_sync.di = mxc_fbi->ipu_di;
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ params.mem_dp_bg_sync.interlaced = true;
++ params.mem_dp_bg_sync.out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ params.mem_dp_bg_sync.in_pixel_fmt = fbi_to_pixfmt(fbi);
++ if (mxc_fbi->alpha_chan_en)
++ params.mem_dp_bg_sync.alpha_chan_en = true;
++ }
++ ipu_init_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, &params);
++
++ return 0;
++}
++
++static int _setup_disp_channel2(struct fb_info *fbi)
++{
++ int retval = 0;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++ int fb_stride;
++ unsigned long base;
++ unsigned int fr_xoff, fr_yoff, fr_w, fr_h;
++
++ switch (fbi_to_pixfmt(fbi)) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YUV444P:
++ fb_stride = fbi->var.xres_virtual;
++ break;
++ default:
++ fb_stride = fbi->fix.line_length;
++ }
++
++ base = fbi->fix.smem_start;
++ fr_xoff = fbi->var.xoffset;
++ fr_w = fbi->var.xres_virtual;
++ if (!(fbi->var.vmode & FB_VMODE_YWRAP)) {
++ dev_dbg(fbi->device, "Y wrap disabled\n");
++ fr_yoff = fbi->var.yoffset % fbi->var.yres;
++ fr_h = fbi->var.yres;
++ base += fbi->fix.line_length * fbi->var.yres *
++ (fbi->var.yoffset / fbi->var.yres);
++ } else {
++ dev_dbg(fbi->device, "Y wrap enabled\n");
++ fr_yoff = fbi->var.yoffset;
++ fr_h = fbi->var.yres_virtual;
++ }
++ base += fr_yoff * fb_stride + fr_xoff;
++
++ mxc_fbi->cur_ipu_buf = 2;
++ init_completion(&mxc_fbi->flip_complete);
++ /*
++ * We don't need to wait for vsync at the first time
++ * we do pan display after fb is initialized, as IPU will
++ * switch to the newly selected buffer automatically,
++ * so we call complete() for both mxc_fbi->flip_complete
++ * and mxc_fbi->alpha_flip_complete.
++ */
++ complete(&mxc_fbi->flip_complete);
++ if (mxc_fbi->alpha_chan_en) {
++ mxc_fbi->cur_ipu_alpha_buf = 1;
++ init_completion(&mxc_fbi->alpha_flip_complete);
++ complete(&mxc_fbi->alpha_flip_complete);
++ }
++
++ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(fbi),
++ fbi->var.xres, fbi->var.yres,
++ fb_stride,
++ fbi->var.rotate,
++ base,
++ base,
++ fbi->var.accel_flags &
++ FB_ACCEL_DOUBLE_FLAG ? 0 : base,
++ 0, 0);
++ if (retval) {
++ dev_err(fbi->device,
++ "ipu_init_channel_buffer error %d\n", retval);
++ return retval;
++ }
++
++ /* update u/v offset */
++ ipu_update_channel_offset(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(fbi),
++ fr_w,
++ fr_h,
++ fr_w,
++ 0, 0,
++ fr_yoff,
++ fr_xoff);
++
++ if (mxc_fbi->alpha_chan_en) {
++ retval = ipu_init_channel_buffer(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ IPU_PIX_FMT_GENERIC,
++ fbi->var.xres, fbi->var.yres,
++ fbi->var.xres,
++ fbi->var.rotate,
++ mxc_fbi->alpha_phy_addr1,
++ mxc_fbi->alpha_phy_addr0,
++ 0,
++ 0, 0);
++ if (retval) {
++ dev_err(fbi->device,
++ "ipu_init_channel_buffer error %d\n", retval);
++ return retval;
++ }
++ }
++
++ return retval;
++}
++
++static bool mxcfb_need_to_set_par(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ if ((fbi->var.activate & FB_ACTIVATE_FORCE) &&
++ (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW)
++ return true;
++
++ /*
++ * Ignore xoffset and yoffset update,
++ * because pan display handles this case.
++ */
++ mxc_fbi->cur_var.xoffset = fbi->var.xoffset;
++ mxc_fbi->cur_var.yoffset = fbi->var.yoffset;
++
++ return !!memcmp(&mxc_fbi->cur_var, &fbi->var,
++ sizeof(struct fb_var_screeninfo));
++}
++
++/*
++ * Set framebuffer parameters and change the operating mode.
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_set_par(struct fb_info *fbi)
++{
++ int retval = 0;
++ u32 mem_len, alpha_mem_len;
++ ipu_di_signal_cfg_t sig_cfg;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ int16_t ov_pos_x = 0, ov_pos_y = 0;
++ int ov_pos_ret = 0;
++ struct mxcfb_info *mxc_fbi_fg = NULL;
++ bool ovfbi_enable = false;
++
++ if (ipu_ch_param_bad_alpha_pos(fbi_to_pixfmt(fbi)) &&
++ mxc_fbi->alpha_chan_en) {
++ dev_err(fbi->device, "Bad pixel format for "
++ "graphics plane fb\n");
++ return -EINVAL;
++ }
++
++ if (mxc_fbi->ovfbi)
++ mxc_fbi_fg = (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++
++ if (mxc_fbi->ovfbi && mxc_fbi_fg)
++ if (mxc_fbi_fg->next_blank == FB_BLANK_UNBLANK)
++ ovfbi_enable = true;
++
++ if (!mxcfb_need_to_set_par(fbi))
++ return 0;
++
++ dev_dbg(fbi->device, "Reconfiguring framebuffer\n");
++
++ if (fbi->var.xres == 0 || fbi->var.yres == 0)
++ return 0;
++
++ if (ovfbi_enable) {
++ ov_pos_ret = ipu_disp_get_window_pos(
++ mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch,
++ &ov_pos_x, &ov_pos_y);
++ if (ov_pos_ret < 0)
++ dev_err(fbi->device, "Get overlay pos failed, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++
++ ipu_clear_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_irq);
++ ipu_disable_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_nf_irq);
++ ipu_disable_irq(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch_nf_irq);
++ ipu_disable_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++ }
++
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_disable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_disable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_disable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++
++ /*
++ * Disable IPU hsp clock if it is enabled for an
++ * additional time in ipu common driver.
++ */
++ if (mxc_fbi->first_set_par && mxc_fbi->late_init)
++ ipu_disable_hsp_clk(mxc_fbi->ipu);
++
++ mxcfb_set_fix(fbi);
++
++ mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
++ if (!fbi->fix.smem_start || (mem_len > fbi->fix.smem_len)) {
++ if (fbi->fix.smem_start)
++ mxcfb_unmap_video_memory(fbi);
++
++ if (mxcfb_map_video_memory(fbi) < 0)
++ return -ENOMEM;
++ }
++
++ if (mxc_fbi->first_set_par) {
++ /*
++ * Clear the screen in case uboot fb pixel format is not
++ * the same to kernel fb pixel format.
++ */
++ if (mxc_fbi->late_init)
++ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
++
++ mxc_fbi->first_set_par = false;
++ }
++
++ if (mxc_fbi->alpha_chan_en) {
++ alpha_mem_len = fbi->var.xres * fbi->var.yres;
++ if ((!mxc_fbi->alpha_phy_addr0 && !mxc_fbi->alpha_phy_addr1) ||
++ (alpha_mem_len > mxc_fbi->alpha_mem_len)) {
++ if (mxc_fbi->alpha_phy_addr0)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr0,
++ mxc_fbi->alpha_phy_addr0);
++ if (mxc_fbi->alpha_phy_addr1)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr1,
++ mxc_fbi->alpha_phy_addr1);
++
++ mxc_fbi->alpha_virt_addr0 =
++ dma_alloc_coherent(fbi->device,
++ alpha_mem_len,
++ &mxc_fbi->alpha_phy_addr0,
++ GFP_DMA | GFP_KERNEL);
++
++ mxc_fbi->alpha_virt_addr1 =
++ dma_alloc_coherent(fbi->device,
++ alpha_mem_len,
++ &mxc_fbi->alpha_phy_addr1,
++ GFP_DMA | GFP_KERNEL);
++ if (mxc_fbi->alpha_virt_addr0 == NULL ||
++ mxc_fbi->alpha_virt_addr1 == NULL) {
++ dev_err(fbi->device, "mxcfb: dma alloc for"
++ " alpha buffer failed.\n");
++ if (mxc_fbi->alpha_virt_addr0)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr0,
++ mxc_fbi->alpha_phy_addr0);
++ if (mxc_fbi->alpha_virt_addr1)
++ dma_free_coherent(fbi->device,
++ mxc_fbi->alpha_mem_len,
++ mxc_fbi->alpha_virt_addr1,
++ mxc_fbi->alpha_phy_addr1);
++ return -ENOMEM;
++ }
++ mxc_fbi->alpha_mem_len = alpha_mem_len;
++ }
++ }
++
++ if (mxc_fbi->next_blank != FB_BLANK_UNBLANK)
++ return retval;
++
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->setup) {
++ retval = mxc_fbi->dispdrv->drv->setup(mxc_fbi->dispdrv, fbi);
++ if (retval < 0) {
++ dev_err(fbi->device, "setup error, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++ return -EINVAL;
++ }
++ }
++
++ _setup_disp_channel1(fbi);
++ if (ovfbi_enable)
++ _setup_disp_channel1(mxc_fbi->ovfbi);
++
++ if (!mxc_fbi->overlay) {
++ uint32_t out_pixel_fmt;
++
++ memset(&sig_cfg, 0, sizeof(sig_cfg));
++ if (fbi->var.vmode & FB_VMODE_INTERLACED)
++ sig_cfg.interlaced = true;
++ out_pixel_fmt = mxc_fbi->ipu_di_pix_fmt;
++ if (fbi->var.vmode & FB_VMODE_ODD_FLD_FIRST) /* PAL */
++ sig_cfg.odd_field_first = true;
++ if (mxc_fbi->ipu_int_clk)
++ sig_cfg.int_clk = true;
++ if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
++ sig_cfg.Hsync_pol = true;
++ if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
++ sig_cfg.Vsync_pol = true;
++ if (!(fbi->var.sync & FB_SYNC_CLK_LAT_FALL))
++ sig_cfg.clk_pol = true;
++ if (fbi->var.sync & FB_SYNC_DATA_INVERT)
++ sig_cfg.data_pol = true;
++ if (!(fbi->var.sync & FB_SYNC_OE_LOW_ACT))
++ sig_cfg.enable_pol = true;
++ if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN)
++ sig_cfg.clkidle_en = true;
++
++ dev_dbg(fbi->device, "pixclock = %ul Hz\n",
++ (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
++
++ if (ipu_init_sync_panel(mxc_fbi->ipu, mxc_fbi->ipu_di,
++ (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
++ fbi->var.xres, fbi->var.yres,
++ out_pixel_fmt,
++ fbi->var.left_margin,
++ fbi->var.hsync_len,
++ fbi->var.right_margin,
++ fbi->var.upper_margin,
++ fbi->var.vsync_len,
++ fbi->var.lower_margin,
++ 0, sig_cfg) != 0) {
++ dev_err(fbi->device,
++ "mxcfb: Error initializing panel.\n");
++ return -EINVAL;
++ }
++
++ fbi->mode =
++ (struct fb_videomode *)fb_match_mode(&fbi->var,
++ &fbi->modelist);
++
++ ipu_disp_set_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch, 0, 0);
++ }
++
++ retval = _setup_disp_channel2(fbi);
++ if (retval) {
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ return retval;
++ }
++
++ if (ovfbi_enable) {
++ if (ov_pos_ret >= 0)
++ ipu_disp_set_window_pos(
++ mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch,
++ ov_pos_x, ov_pos_y);
++ retval = _setup_disp_channel2(mxc_fbi->ovfbi);
++ if (retval) {
++ ipu_uninit_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ return retval;
++ }
++ }
++
++ ipu_enable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ if (ovfbi_enable)
++ ipu_enable_channel(mxc_fbi_fg->ipu, mxc_fbi_fg->ipu_ch);
++
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->enable) {
++ retval = mxc_fbi->dispdrv->drv->enable(mxc_fbi->dispdrv);
++ if (retval < 0) {
++ dev_err(fbi->device, "enable error, dispdrv:%s.\n",
++ mxc_fbi->dispdrv->drv->name);
++ return -EINVAL;
++ }
++ }
++
++ mxc_fbi->cur_var = fbi->var;
++
++ return retval;
++}
++
++static int _swap_channels(struct fb_info *fbi_from,
++ struct fb_info *fbi_to, bool both_on)
++{
++ int retval, tmp;
++ ipu_channel_t old_ch;
++ struct fb_info *ovfbi;
++ struct mxcfb_info *mxc_fbi_from = (struct mxcfb_info *)fbi_from->par;
++ struct mxcfb_info *mxc_fbi_to = (struct mxcfb_info *)fbi_to->par;
++
++ if (both_on) {
++ ipu_disable_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch, true);
++ ipu_uninit_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch);
++ }
++
++ /* switch the mxc fbi parameters */
++ old_ch = mxc_fbi_from->ipu_ch;
++ mxc_fbi_from->ipu_ch = mxc_fbi_to->ipu_ch;
++ mxc_fbi_to->ipu_ch = old_ch;
++ tmp = mxc_fbi_from->ipu_ch_irq;
++ mxc_fbi_from->ipu_ch_irq = mxc_fbi_to->ipu_ch_irq;
++ mxc_fbi_to->ipu_ch_irq = tmp;
++ tmp = mxc_fbi_from->ipu_ch_nf_irq;
++ mxc_fbi_from->ipu_ch_nf_irq = mxc_fbi_to->ipu_ch_nf_irq;
++ mxc_fbi_to->ipu_ch_nf_irq = tmp;
++ ovfbi = mxc_fbi_from->ovfbi;
++ mxc_fbi_from->ovfbi = mxc_fbi_to->ovfbi;
++ mxc_fbi_to->ovfbi = ovfbi;
++
++ _setup_disp_channel1(fbi_from);
++ retval = _setup_disp_channel2(fbi_from);
++ if (retval)
++ return retval;
++
++ /* switch between dp and dc, disable old idmac, enable new idmac */
++ retval = ipu_swap_channel(mxc_fbi_from->ipu, old_ch, mxc_fbi_from->ipu_ch);
++ ipu_uninit_channel(mxc_fbi_from->ipu, old_ch);
++
++ if (both_on) {
++ _setup_disp_channel1(fbi_to);
++ retval = _setup_disp_channel2(fbi_to);
++ if (retval)
++ return retval;
++ ipu_enable_channel(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch);
++ }
++
++ return retval;
++}
++
++static int swap_channels(struct fb_info *fbi_from)
++{
++ int i;
++ int swap_mode;
++ ipu_channel_t ch_to;
++ struct mxcfb_info *mxc_fbi_from = (struct mxcfb_info *)fbi_from->par;
++ struct fb_info *fbi_to = NULL;
++ struct mxcfb_info *mxc_fbi_to;
++
++ /* what's the target channel? */
++ if (mxc_fbi_from->ipu_ch == MEM_BG_SYNC)
++ ch_to = MEM_DC_SYNC;
++ else
++ ch_to = MEM_BG_SYNC;
++
++ fbi_to = found_registered_fb(ch_to, mxc_fbi_from->ipu_id);
++ if (!fbi_to)
++ return -1;
++ mxc_fbi_to = (struct mxcfb_info *)fbi_to->par;
++
++ ipu_clear_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq);
++ ipu_clear_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq);
++ ipu_free_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq, fbi_from);
++ ipu_free_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq, fbi_to);
++ ipu_clear_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq);
++ ipu_clear_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq);
++ ipu_free_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq, fbi_from);
++ ipu_free_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq, fbi_to);
++
++ if (mxc_fbi_from->cur_blank == FB_BLANK_UNBLANK) {
++ if (mxc_fbi_to->cur_blank == FB_BLANK_UNBLANK)
++ swap_mode = BOTH_ON;
++ else
++ swap_mode = SRC_ON;
++ } else {
++ if (mxc_fbi_to->cur_blank == FB_BLANK_UNBLANK)
++ swap_mode = TGT_ON;
++ else
++ swap_mode = BOTH_OFF;
++ }
++
++ switch (swap_mode) {
++ case BOTH_ON:
++ /* disable target->switch src->enable target */
++ _swap_channels(fbi_from, fbi_to, true);
++ break;
++ case SRC_ON:
++ /* just switch src */
++ _swap_channels(fbi_from, fbi_to, false);
++ break;
++ case TGT_ON:
++ /* just switch target */
++ _swap_channels(fbi_to, fbi_from, false);
++ break;
++ case BOTH_OFF:
++ /* switch directly, no more need to do */
++ mxc_fbi_to->ipu_ch = mxc_fbi_from->ipu_ch;
++ mxc_fbi_from->ipu_ch = ch_to;
++ i = mxc_fbi_from->ipu_ch_irq;
++ mxc_fbi_from->ipu_ch_irq = mxc_fbi_to->ipu_ch_irq;
++ mxc_fbi_to->ipu_ch_irq = i;
++ i = mxc_fbi_from->ipu_ch_nf_irq;
++ mxc_fbi_from->ipu_ch_nf_irq = mxc_fbi_to->ipu_ch_nf_irq;
++ mxc_fbi_to->ipu_ch_nf_irq = i;
++ break;
++ default:
++ break;
++ }
++
++ if (ipu_request_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_from) != 0) {
++ dev_err(fbi_from->device, "Error registering irq %d\n",
++ mxc_fbi_from->ipu_ch_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_irq);
++ if (ipu_request_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_to) != 0) {
++ dev_err(fbi_to->device, "Error registering irq %d\n",
++ mxc_fbi_to->ipu_ch_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_irq);
++ if (ipu_request_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_from) != 0) {
++ dev_err(fbi_from->device, "Error registering irq %d\n",
++ mxc_fbi_from->ipu_ch_nf_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_from->ipu, mxc_fbi_from->ipu_ch_nf_irq);
++ if (ipu_request_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi_to) != 0) {
++ dev_err(fbi_to->device, "Error registering irq %d\n",
++ mxc_fbi_to->ipu_ch_nf_irq);
++ return -EBUSY;
++ }
++ ipu_disable_irq(mxc_fbi_to->ipu, mxc_fbi_to->ipu_ch_nf_irq);
++
++ return 0;
++}
++
++/*
++ * Check framebuffer variable parameters and adjust to valid values.
++ *
++ * @param var framebuffer variable parameters
++ *
++ * @param info framebuffer information pointer
++ */
++static int mxcfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ u32 vtotal;
++ u32 htotal;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par;
++
++
++ if (var->xres == 0 || var->yres == 0)
++ return 0;
++
++ /* fg should not bigger than bg */
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ struct fb_info *fbi_tmp;
++ int bg_xres = 0, bg_yres = 0;
++ int16_t pos_x, pos_y;
++
++ bg_xres = var->xres;
++ bg_yres = var->yres;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp) {
++ bg_xres = fbi_tmp->var.xres;
++ bg_yres = fbi_tmp->var.yres;
++ }
++
++ ipu_disp_get_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch, &pos_x, &pos_y);
++
++ if ((var->xres + pos_x) > bg_xres)
++ var->xres = bg_xres - pos_x;
++ if ((var->yres + pos_y) > bg_yres)
++ var->yres = bg_yres - pos_y;
++ }
++
++ if (var->rotate > IPU_ROTATE_VERT_FLIP)
++ var->rotate = IPU_ROTATE_NONE;
++
++ if (var->xres_virtual < var->xres)
++ var->xres_virtual = var->xres;
++
++ if (var->yres_virtual < var->yres)
++ var->yres_virtual = var->yres * 3;
++
++ if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
++ (var->bits_per_pixel != 16) && (var->bits_per_pixel != 12) &&
++ (var->bits_per_pixel != 8))
++ var->bits_per_pixel = 16;
++
++ if (check_var_pixfmt(var))
++ /* Fall back to default */
++ bpp_to_var(var->bits_per_pixel, var);
++
++ if (var->pixclock < 1000) {
++ htotal = var->xres + var->right_margin + var->hsync_len +
++ var->left_margin;
++ vtotal = var->yres + var->lower_margin + var->vsync_len +
++ var->upper_margin;
++ var->pixclock = (vtotal * htotal * 6UL) / 100UL;
++ var->pixclock = KHZ2PICOS(var->pixclock);
++ dev_dbg(info->device,
++ "pixclock set for 60Hz refresh = %u ps\n",
++ var->pixclock);
++ }
++
++ var->height = -1;
++ var->width = -1;
++ var->grayscale = 0;
++
++ return 0;
++}
++
++static inline u_int _chan_to_field(u_int chan, struct fb_bitfield *bf)
++{
++ chan &= 0xffff;
++ chan >>= 16 - bf->length;
++ return chan << bf->offset;
++}
++
++static int mxcfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
++ u_int trans, struct fb_info *fbi)
++{
++ unsigned int val;
++ int ret = 1;
++
++ /*
++ * If greyscale is true, then we convert the RGB value
++ * to greyscale no matter what visual we are using.
++ */
++ if (fbi->var.grayscale)
++ red = green = blue = (19595 * red + 38470 * green +
++ 7471 * blue) >> 16;
++ switch (fbi->fix.visual) {
++ case FB_VISUAL_TRUECOLOR:
++ /*
++ * 16-bit True Colour. We encode the RGB value
++ * according to the RGB bitfield information.
++ */
++ if (regno < 16) {
++ u32 *pal = fbi->pseudo_palette;
++
++ val = _chan_to_field(red, &fbi->var.red);
++ val |= _chan_to_field(green, &fbi->var.green);
++ val |= _chan_to_field(blue, &fbi->var.blue);
++
++ pal[regno] = val;
++ ret = 0;
++ }
++ break;
++
++ case FB_VISUAL_STATIC_PSEUDOCOLOR:
++ case FB_VISUAL_PSEUDOCOLOR:
++ break;
++ }
++
++ return ret;
++}
++
++/*
++ * Function to handle custom ioctls for MXC framebuffer.
++ *
++ * @param inode inode struct
++ *
++ * @param file file struct
++ *
++ * @param cmd Ioctl command to handle
++ *
++ * @param arg User pointer to command arguments
++ *
++ * @param fbi framebuffer information pointer
++ */
++static int mxcfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
++{
++ int retval = 0;
++ int __user *argp = (void __user *)arg;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ switch (cmd) {
++ case MXCFB_SET_GBL_ALPHA:
++ {
++ struct mxcfb_gbl_alpha ga;
++
++ if (copy_from_user(&ga, (void *)arg, sizeof(ga))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (ipu_disp_set_global_alpha(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ (bool)ga.enable,
++ ga.alpha)) {
++ retval = -EINVAL;
++ break;
++ }
++
++ if (ga.enable)
++ mxc_fbi->alpha_chan_en = false;
++
++ if (ga.enable)
++ dev_dbg(fbi->device,
++ "Set global alpha of %s to %d\n",
++ fbi->fix.id, ga.alpha);
++ break;
++ }
++ case MXCFB_SET_LOC_ALPHA:
++ {
++ struct mxcfb_loc_alpha la;
++ bool bad_pixfmt =
++ ipu_ch_param_bad_alpha_pos(fbi_to_pixfmt(fbi));
++
++ if (copy_from_user(&la, (void *)arg, sizeof(la))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (la.enable && !la.alpha_in_pixel) {
++ struct fb_info *fbi_tmp;
++ ipu_channel_t ipu_ch;
++
++ if (bad_pixfmt) {
++ dev_err(fbi->device, "Bad pixel format "
++ "for graphics plane fb\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ mxc_fbi->alpha_chan_en = true;
++
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC)
++ ipu_ch = MEM_BG_SYNC;
++ else if (mxc_fbi->ipu_ch == MEM_BG_SYNC)
++ ipu_ch = MEM_FG_SYNC;
++ else {
++ retval = -EINVAL;
++ break;
++ }
++
++ fbi_tmp = found_registered_fb(ipu_ch, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ ((struct mxcfb_info *)(fbi_tmp->par))->alpha_chan_en = false;
++ } else
++ mxc_fbi->alpha_chan_en = false;
++
++ if (ipu_disp_set_global_alpha(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ !(bool)la.enable, 0)) {
++ retval = -EINVAL;
++ break;
++ }
++
++ fbi->var.activate = (fbi->var.activate & ~FB_ACTIVATE_MASK) |
++ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
++ mxcfb_set_par(fbi);
++
++ la.alpha_phy_addr0 = mxc_fbi->alpha_phy_addr0;
++ la.alpha_phy_addr1 = mxc_fbi->alpha_phy_addr1;
++ if (copy_to_user((void *)arg, &la, sizeof(la))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ if (la.enable)
++ dev_dbg(fbi->device,
++ "Enable DP local alpha for %s\n",
++ fbi->fix.id);
++ break;
++ }
++ case MXCFB_SET_LOC_ALP_BUF:
++ {
++ unsigned long base;
++ uint32_t ipu_alp_ch_irq;
++
++ if (!(((mxc_fbi->ipu_ch == MEM_FG_SYNC) ||
++ (mxc_fbi->ipu_ch == MEM_BG_SYNC)) &&
++ (mxc_fbi->alpha_chan_en))) {
++ dev_err(fbi->device,
++ "Should use background or overlay "
++ "framebuffer to set the alpha buffer "
++ "number\n");
++ return -EINVAL;
++ }
++
++ if (get_user(base, argp))
++ return -EFAULT;
++
++ if (base != mxc_fbi->alpha_phy_addr0 &&
++ base != mxc_fbi->alpha_phy_addr1) {
++ dev_err(fbi->device,
++ "Wrong alpha buffer physical address "
++ "%lu\n", base);
++ return -EINVAL;
++ }
++
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC)
++ ipu_alp_ch_irq = IPU_IRQ_FG_ALPHA_SYNC_EOF;
++ else
++ ipu_alp_ch_irq = IPU_IRQ_BG_ALPHA_SYNC_EOF;
++
++ retval = wait_for_completion_timeout(
++ &mxc_fbi->alpha_flip_complete, HZ/2);
++ if (retval == 0) {
++ dev_err(fbi->device, "timeout when waiting for alpha flip irq\n");
++ retval = -ETIMEDOUT;
++ break;
++ }
++
++ mxc_fbi->cur_ipu_alpha_buf =
++ !mxc_fbi->cur_ipu_alpha_buf;
++ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->
++ cur_ipu_alpha_buf,
++ base) == 0) {
++ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf);
++ ipu_clear_irq(mxc_fbi->ipu, ipu_alp_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, ipu_alp_ch_irq);
++ } else {
++ dev_err(fbi->device,
++ "Error updating %s SDC alpha buf %d "
++ "to address=0x%08lX\n",
++ fbi->fix.id,
++ mxc_fbi->cur_ipu_alpha_buf, base);
++ }
++ break;
++ }
++ case MXCFB_SET_CLR_KEY:
++ {
++ struct mxcfb_color_key key;
++ if (copy_from_user(&key, (void *)arg, sizeof(key))) {
++ retval = -EFAULT;
++ break;
++ }
++ retval = ipu_disp_set_color_key(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ key.enable,
++ key.color_key);
++ dev_dbg(fbi->device, "Set color key to 0x%08X\n",
++ key.color_key);
++ break;
++ }
++ case MXCFB_SET_GAMMA:
++ {
++ struct mxcfb_gamma gamma;
++ if (copy_from_user(&gamma, (void *)arg, sizeof(gamma))) {
++ retval = -EFAULT;
++ break;
++ }
++ retval = ipu_disp_set_gamma_correction(mxc_fbi->ipu,
++ mxc_fbi->ipu_ch,
++ gamma.enable,
++ gamma.constk,
++ gamma.slopek);
++ break;
++ }
++ case MXCFB_WAIT_FOR_VSYNC:
++ {
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ /* BG should poweron */
++ struct mxcfb_info *bg_mxcfbi = NULL;
++ struct fb_info *fbi_tmp;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ bg_mxcfbi = ((struct mxcfb_info *)(fbi_tmp->par));
++
++ if (!bg_mxcfbi) {
++ retval = -EINVAL;
++ break;
++ }
++ if (bg_mxcfbi->cur_blank != FB_BLANK_UNBLANK) {
++ retval = -EINVAL;
++ break;
++ }
++ }
++ if (mxc_fbi->cur_blank != FB_BLANK_UNBLANK) {
++ retval = -EINVAL;
++ break;
++ }
++
++ init_completion(&mxc_fbi->vsync_complete);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_nf_irq);
++ retval = wait_for_completion_interruptible_timeout(
++ &mxc_fbi->vsync_complete, 1 * HZ);
++ if (retval == 0) {
++ dev_err(fbi->device,
++ "MXCFB_WAIT_FOR_VSYNC: timeout %d\n",
++ retval);
++ retval = -ETIME;
++ } else if (retval > 0) {
++ retval = 0;
++ }
++ break;
++ }
++ case FBIO_ALLOC:
++ {
++ int size;
++ struct mxcfb_alloc_list *mem;
++
++ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
++ if (mem == NULL)
++ return -ENOMEM;
++
++ if (get_user(size, argp))
++ return -EFAULT;
++
++ mem->size = PAGE_ALIGN(size);
++
++ mem->cpu_addr = dma_alloc_coherent(fbi->device, size,
++ &mem->phy_addr,
++ GFP_KERNEL);
++ if (mem->cpu_addr == NULL) {
++ kfree(mem);
++ return -ENOMEM;
++ }
++
++ list_add(&mem->list, &fb_alloc_list);
++
++ dev_dbg(fbi->device, "allocated %d bytes @ 0x%08X\n",
++ mem->size, mem->phy_addr);
++
++ if (put_user(mem->phy_addr, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case FBIO_FREE:
++ {
++ unsigned long offset;
++ struct mxcfb_alloc_list *mem;
++
++ if (get_user(offset, argp))
++ return -EFAULT;
++
++ retval = -EINVAL;
++ list_for_each_entry(mem, &fb_alloc_list, list) {
++ if (mem->phy_addr == offset) {
++ list_del(&mem->list);
++ dma_free_coherent(fbi->device,
++ mem->size,
++ mem->cpu_addr,
++ mem->phy_addr);
++ kfree(mem);
++ retval = 0;
++ break;
++ }
++ }
++
++ break;
++ }
++ case MXCFB_SET_OVERLAY_POS:
++ {
++ struct mxcfb_pos pos;
++ struct fb_info *bg_fbi = NULL;
++ struct mxcfb_info *bg_mxcfbi = NULL;
++
++ if (mxc_fbi->ipu_ch != MEM_FG_SYNC) {
++ dev_err(fbi->device, "Should use the overlay "
++ "framebuffer to set the position of "
++ "the overlay window\n");
++ retval = -EINVAL;
++ break;
++ }
++
++ if (copy_from_user(&pos, (void *)arg, sizeof(pos))) {
++ retval = -EFAULT;
++ break;
++ }
++
++ bg_fbi = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (bg_fbi)
++ bg_mxcfbi = ((struct mxcfb_info *)(bg_fbi->par));
++
++ if (bg_fbi == NULL) {
++ dev_err(fbi->device, "Cannot find the "
++ "background framebuffer\n");
++ retval = -ENOENT;
++ break;
++ }
++
++ /* if fb is unblank, check if the pos fit the display */
++ if (mxc_fbi->cur_blank == FB_BLANK_UNBLANK) {
++ if (fbi->var.xres + pos.x > bg_fbi->var.xres) {
++ if (bg_fbi->var.xres < fbi->var.xres)
++ pos.x = 0;
++ else
++ pos.x = bg_fbi->var.xres - fbi->var.xres;
++ }
++ if (fbi->var.yres + pos.y > bg_fbi->var.yres) {
++ if (bg_fbi->var.yres < fbi->var.yres)
++ pos.y = 0;
++ else
++ pos.y = bg_fbi->var.yres - fbi->var.yres;
++ }
++ }
++
++ retval = ipu_disp_set_window_pos(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ pos.x, pos.y);
++
++ if (copy_to_user((void *)arg, &pos, sizeof(pos))) {
++ retval = -EFAULT;
++ break;
++ }
++ break;
++ }
++ case MXCFB_GET_FB_IPU_CHAN:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_ch, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_DIFMT:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_di_pix_fmt, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_FB_IPU_DI:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->ipu_di, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_GET_FB_BLANK:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (put_user(mxc_fbi->cur_blank, argp))
++ return -EFAULT;
++ break;
++ }
++ case MXCFB_SET_DIFMT:
++ {
++ struct mxcfb_info *mxc_fbi =
++ (struct mxcfb_info *)fbi->par;
++
++ if (get_user(mxc_fbi->ipu_di_pix_fmt, argp))
++ return -EFAULT;
++
++ break;
++ }
++ case MXCFB_CSC_UPDATE:
++ {
++ struct mxcfb_csc_matrix csc;
++
++ if (copy_from_user(&csc, (void *) arg, sizeof(csc)))
++ return -EFAULT;
++
++ if ((mxc_fbi->ipu_ch != MEM_FG_SYNC) &&
++ (mxc_fbi->ipu_ch != MEM_BG_SYNC) &&
++ (mxc_fbi->ipu_ch != MEM_BG_ASYNC0))
++ return -EFAULT;
++ ipu_set_csc_coefficients(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ csc.param);
++ break;
++ }
++ default:
++ retval = -EINVAL;
++ }
++ return retval;
++}
++
++/*
++ * mxcfb_blank():
++ * Blank the display.
++ */
++static int mxcfb_blank(int blank, struct fb_info *info)
++{
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par;
++ int ret = 0;
++
++ dev_dbg(info->device, "blank = %d\n", blank);
++
++ if (mxc_fbi->cur_blank == blank)
++ return 0;
++
++ mxc_fbi->next_blank = blank;
++
++ switch (blank) {
++ case FB_BLANK_POWERDOWN:
++ case FB_BLANK_VSYNC_SUSPEND:
++ case FB_BLANK_HSYNC_SUSPEND:
++ case FB_BLANK_NORMAL:
++ if (mxc_fbi->dispdrv && mxc_fbi->dispdrv->drv->disable)
++ mxc_fbi->dispdrv->drv->disable(mxc_fbi->dispdrv);
++ ipu_disable_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch, true);
++ if (mxc_fbi->ipu_di >= 0)
++ ipu_uninit_sync_panel(mxc_fbi->ipu, mxc_fbi->ipu_di);
++ ipu_uninit_channel(mxc_fbi->ipu, mxc_fbi->ipu_ch);
++ break;
++ case FB_BLANK_UNBLANK:
++ info->var.activate = (info->var.activate & ~FB_ACTIVATE_MASK) |
++ FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
++ ret = mxcfb_set_par(info);
++ break;
++ }
++ if (!ret)
++ mxc_fbi->cur_blank = blank;
++ return ret;
++}
++
++/*
++ * Pan or Wrap the Display
++ *
++ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
++ *
++ * @param var Variable screen buffer information
++ * @param info Framebuffer information pointer
++ */
++static int
++mxcfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)info->par,
++ *mxc_graphic_fbi = NULL;
++ u_int y_bottom;
++ unsigned int fr_xoff, fr_yoff, fr_w, fr_h;
++ unsigned long base, active_alpha_phy_addr = 0;
++ bool loc_alpha_en = false;
++ int fb_stride;
++ int i;
++ int ret;
++
++ /* no pan display during fb blank */
++ if (mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ struct mxcfb_info *bg_mxcfbi = NULL;
++ struct fb_info *fbi_tmp;
++
++ fbi_tmp = found_registered_fb(MEM_BG_SYNC, mxc_fbi->ipu_id);
++ if (fbi_tmp)
++ bg_mxcfbi = ((struct mxcfb_info *)(fbi_tmp->par));
++ if (!bg_mxcfbi)
++ return -EINVAL;
++ if (bg_mxcfbi->cur_blank != FB_BLANK_UNBLANK)
++ return -EINVAL;
++ }
++ if (mxc_fbi->cur_blank != FB_BLANK_UNBLANK)
++ return -EINVAL;
++
++ y_bottom = var->yoffset;
++
++ if (y_bottom > info->var.yres_virtual)
++ return -EINVAL;
++
++ switch (fbi_to_pixfmt(info)) {
++ case IPU_PIX_FMT_YUV420P2:
++ case IPU_PIX_FMT_YVU420P:
++ case IPU_PIX_FMT_NV12:
++ case IPU_PIX_FMT_YUV422P:
++ case IPU_PIX_FMT_YVU422P:
++ case IPU_PIX_FMT_YUV420P:
++ case IPU_PIX_FMT_YUV444P:
++ fb_stride = info->var.xres_virtual;
++ break;
++ default:
++ fb_stride = info->fix.line_length;
++ }
++
++ base = info->fix.smem_start;
++ fr_xoff = var->xoffset;
++ fr_w = info->var.xres_virtual;
++ if (!(var->vmode & FB_VMODE_YWRAP)) {
++ dev_dbg(info->device, "Y wrap disabled\n");
++ fr_yoff = var->yoffset % info->var.yres;
++ fr_h = info->var.yres;
++ base += info->fix.line_length * info->var.yres *
++ (var->yoffset / info->var.yres);
++ } else {
++ dev_dbg(info->device, "Y wrap enabled\n");
++ fr_yoff = var->yoffset;
++ fr_h = info->var.yres_virtual;
++ }
++ base += fr_yoff * fb_stride + fr_xoff;
++
++ /* Check if DP local alpha is enabled and find the graphic fb */
++ if (mxc_fbi->ipu_ch == MEM_BG_SYNC || mxc_fbi->ipu_ch == MEM_FG_SYNC) {
++ for (i = 0; i < num_registered_fb; i++) {
++ char bg_id[] = "DISP3 BG";
++ char fg_id[] = "DISP3 FG";
++ char *idstr = registered_fb[i]->fix.id;
++ bg_id[4] += mxc_fbi->ipu_id;
++ fg_id[4] += mxc_fbi->ipu_id;
++ if ((strcmp(idstr, bg_id) == 0 ||
++ strcmp(idstr, fg_id) == 0) &&
++ ((struct mxcfb_info *)
++ (registered_fb[i]->par))->alpha_chan_en) {
++ loc_alpha_en = true;
++ mxc_graphic_fbi = (struct mxcfb_info *)
++ (registered_fb[i]->par);
++ active_alpha_phy_addr =
++ mxc_fbi->cur_ipu_alpha_buf ?
++ mxc_graphic_fbi->alpha_phy_addr1 :
++ mxc_graphic_fbi->alpha_phy_addr0;
++ dev_dbg(info->device, "Updating SDC alpha "
++ "buf %d address=0x%08lX\n",
++ !mxc_fbi->cur_ipu_alpha_buf,
++ active_alpha_phy_addr);
++ break;
++ }
++ }
++ }
++
++ ret = wait_for_completion_timeout(&mxc_fbi->flip_complete, HZ/2);
++ if (ret == 0) {
++ dev_err(info->device, "timeout when waiting for flip irq\n");
++ return -ETIMEDOUT;
++ }
++
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ mxc_fbi->cur_ipu_alpha_buf = !mxc_fbi->cur_ipu_alpha_buf;
++
++ dev_dbg(info->device, "Updating SDC %s buf %d address=0x%08lX\n",
++ info->fix.id, mxc_fbi->cur_ipu_buf, base);
++
++ if (ipu_update_channel_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ mxc_fbi->cur_ipu_buf, base) == 0) {
++ /* Update the DP local alpha buffer only for graphic plane */
++ if (loc_alpha_en && mxc_graphic_fbi == mxc_fbi &&
++ ipu_update_channel_buffer(mxc_graphic_fbi->ipu, mxc_graphic_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf,
++ active_alpha_phy_addr) == 0) {
++ ipu_select_buffer(mxc_graphic_fbi->ipu, mxc_graphic_fbi->ipu_ch,
++ IPU_ALPHA_IN_BUFFER,
++ mxc_fbi->cur_ipu_alpha_buf);
++ }
++
++ /* update u/v offset */
++ ipu_update_channel_offset(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER,
++ fbi_to_pixfmt(info),
++ fr_w,
++ fr_h,
++ fr_w,
++ 0, 0,
++ fr_yoff,
++ fr_xoff);
++
++ ipu_select_buffer(mxc_fbi->ipu, mxc_fbi->ipu_ch, IPU_INPUT_BUFFER,
++ mxc_fbi->cur_ipu_buf);
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ } else {
++ dev_err(info->device,
++ "Error updating SDC buf %d to address=0x%08lX, "
++ "current buf %d, buf0 ready %d, buf1 ready %d, "
++ "buf2 ready %d\n", mxc_fbi->cur_ipu_buf, base,
++ ipu_get_cur_buffer_idx(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 0),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 1),
++ ipu_check_buffer_ready(mxc_fbi->ipu, mxc_fbi->ipu_ch,
++ IPU_INPUT_BUFFER, 2));
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ ++mxc_fbi->cur_ipu_buf;
++ mxc_fbi->cur_ipu_buf %= 3;
++ mxc_fbi->cur_ipu_alpha_buf = !mxc_fbi->cur_ipu_alpha_buf;
++ ipu_clear_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ ipu_enable_irq(mxc_fbi->ipu, mxc_fbi->ipu_ch_irq);
++ return -EBUSY;
++ }
++
++ dev_dbg(info->device, "Update complete\n");
++
++ info->var.yoffset = var->yoffset;
++
++ return 0;
++}
++
++/*
++ * Function to handle custom mmap for MXC framebuffer.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @param vma Pointer to vm_area_struct
++ */
++static int mxcfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
++{
++ bool found = false;
++ u32 len;
++ unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++ struct mxcfb_alloc_list *mem;
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ if (offset < fbi->fix.smem_len) {
++ /* mapping framebuffer memory */
++ len = fbi->fix.smem_len - offset;
++ vma->vm_pgoff = (fbi->fix.smem_start + offset) >> PAGE_SHIFT;
++ } else if ((vma->vm_pgoff ==
++ (mxc_fbi->alpha_phy_addr0 >> PAGE_SHIFT)) ||
++ (vma->vm_pgoff ==
++ (mxc_fbi->alpha_phy_addr1 >> PAGE_SHIFT))) {
++ len = mxc_fbi->alpha_mem_len;
++ } else {
++ list_for_each_entry(mem, &fb_alloc_list, list) {
++ if (offset == mem->phy_addr) {
++ found = true;
++ len = mem->size;
++ break;
++ }
++ }
++ if (!found)
++ return -EINVAL;
++ }
++
++ len = PAGE_ALIGN(len);
++ if (vma->vm_end - vma->vm_start > len)
++ return -EINVAL;
++
++ /* make buffers bufferable */
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++
++ vma->vm_flags |= VM_IO;
++
++ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
++ dev_dbg(fbi->device, "mmap remap_pfn_range failed\n");
++ return -ENOBUFS;
++ }
++
++ return 0;
++}
++
++/*!
++ * This structure contains the pointers to the control functions that are
++ * invoked by the core framebuffer driver to perform operations like
++ * blitting, rectangle filling, copy regions and cursor definition.
++ */
++static struct fb_ops mxcfb_ops = {
++ .owner = THIS_MODULE,
++ .fb_set_par = mxcfb_set_par,
++ .fb_check_var = mxcfb_check_var,
++ .fb_setcolreg = mxcfb_setcolreg,
++ .fb_pan_display = mxcfb_pan_display,
++ .fb_ioctl = mxcfb_ioctl,
++ .fb_mmap = mxcfb_mmap,
++ .fb_fillrect = cfb_fillrect,
++ .fb_copyarea = cfb_copyarea,
++ .fb_imageblit = cfb_imageblit,
++ .fb_blank = mxcfb_blank,
++};
++
++static irqreturn_t mxcfb_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->flip_complete);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mxcfb_nf_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->vsync_complete);
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t mxcfb_alpha_irq_handler(int irq, void *dev_id)
++{
++ struct fb_info *fbi = dev_id;
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ complete(&mxc_fbi->alpha_flip_complete);
++ return IRQ_HANDLED;
++}
++
++/*
++ * Suspends the framebuffer and blanks the screen. Power management support
++ */
++static int mxcfb_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++ int saved_blank;
++#ifdef CONFIG_FB_MXC_LOW_PWR_DISPLAY
++ void *fbmem;
++#endif
++
++ if (mxc_fbi->ovfbi) {
++ struct mxcfb_info *mxc_fbi_fg =
++ (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++
++ console_lock();
++ fb_set_suspend(mxc_fbi->ovfbi, 1);
++ saved_blank = mxc_fbi_fg->cur_blank;
++ mxcfb_blank(FB_BLANK_POWERDOWN, mxc_fbi->ovfbi);
++ mxc_fbi_fg->next_blank = saved_blank;
++ console_unlock();
++ }
++
++ console_lock();
++ fb_set_suspend(fbi, 1);
++ saved_blank = mxc_fbi->cur_blank;
++ mxcfb_blank(FB_BLANK_POWERDOWN, fbi);
++ mxc_fbi->next_blank = saved_blank;
++ console_unlock();
++
++ return 0;
++}
++
++/*
++ * Resumes the framebuffer and unblanks the screen. Power management support
++ */
++static int mxcfb_resume(struct platform_device *pdev)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = (struct mxcfb_info *)fbi->par;
++
++ console_lock();
++ mxcfb_blank(mxc_fbi->next_blank, fbi);
++ fb_set_suspend(fbi, 0);
++ console_unlock();
++
++ if (mxc_fbi->ovfbi) {
++ struct mxcfb_info *mxc_fbi_fg =
++ (struct mxcfb_info *)mxc_fbi->ovfbi->par;
++ console_lock();
++ mxcfb_blank(mxc_fbi_fg->next_blank, mxc_fbi->ovfbi);
++ fb_set_suspend(mxc_fbi->ovfbi, 0);
++ console_unlock();
++ }
++
++ return 0;
++}
++
++/*
++ * Main framebuffer functions
++ */
++
++/*!
++ * Allocates the DRAM memory for the frame buffer. This buffer is remapped
++ * into a non-cached, non-buffered, memory region to allow palette and pixel
++ * writes to occur without flushing the cache. Once this area is remapped,
++ * all virtual memory access to the video memory should occur at the new region.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxcfb_map_video_memory(struct fb_info *fbi)
++{
++ if (fbi->fix.smem_len < fbi->var.yres_virtual * fbi->fix.line_length)
++ fbi->fix.smem_len = fbi->var.yres_virtual *
++ fbi->fix.line_length;
++
++ fbi->screen_base = dma_alloc_writecombine(fbi->device,
++ fbi->fix.smem_len,
++ (dma_addr_t *)&fbi->fix.smem_start,
++ GFP_DMA | GFP_KERNEL);
++ if (fbi->screen_base == 0) {
++ dev_err(fbi->device, "Unable to allocate framebuffer memory\n");
++ fbi->fix.smem_len = 0;
++ fbi->fix.smem_start = 0;
++ return -EBUSY;
++ }
++
++ dev_dbg(fbi->device, "allocated fb @ paddr=0x%08X, size=%d.\n",
++ (uint32_t) fbi->fix.smem_start, fbi->fix.smem_len);
++
++ fbi->screen_size = fbi->fix.smem_len;
++
++ /* Clear the screen */
++ memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
++
++ return 0;
++}
++
++/*!
++ * De-allocates the DRAM memory for the frame buffer.
++ *
++ * @param fbi framebuffer information pointer
++ *
++ * @return Error code indicating success or failure
++ */
++static int mxcfb_unmap_video_memory(struct fb_info *fbi)
++{
++ dma_free_writecombine(fbi->device, fbi->fix.smem_len,
++ fbi->screen_base, fbi->fix.smem_start);
++ fbi->screen_base = 0;
++ fbi->fix.smem_start = 0;
++ fbi->fix.smem_len = 0;
++ return 0;
++}
++
++/*!
++ * Initializes the framebuffer information pointer. After allocating
++ * sufficient memory for the framebuffer structure, the fields are
++ * filled with custom information passed in from the configurable
++ * structures. This includes information such as bits per pixel,
++ * color maps, screen width/height and RGBA offsets.
++ *
++ * @return Framebuffer structure initialized with our information
++ */
++static struct fb_info *mxcfb_init_fbinfo(struct device *dev, struct fb_ops *ops)
++{
++ struct fb_info *fbi;
++ struct mxcfb_info *mxcfbi;
++
++ /*
++ * Allocate sufficient memory for the fb structure
++ */
++ fbi = framebuffer_alloc(sizeof(struct mxcfb_info), dev);
++ if (!fbi)
++ return NULL;
++
++ mxcfbi = (struct mxcfb_info *)fbi->par;
++
++ fbi->var.activate = FB_ACTIVATE_NOW;
++
++ fbi->fbops = ops;
++ fbi->flags = FBINFO_FLAG_DEFAULT;
++ fbi->pseudo_palette = mxcfbi->pseudo_palette;
++
++ /*
++ * Allocate colormap
++ */
++ fb_alloc_cmap(&fbi->cmap, 16, 0);
++
++ return fbi;
++}
++
++static ssize_t show_disp_chan(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++
++ if (mxcfbi->ipu_ch == MEM_BG_SYNC)
++ return sprintf(buf, "2-layer-fb-bg\n");
++ else if (mxcfbi->ipu_ch == MEM_FG_SYNC)
++ return sprintf(buf, "2-layer-fb-fg\n");
++ else if (mxcfbi->ipu_ch == MEM_DC_SYNC)
++ return sprintf(buf, "1-layer-fb\n");
++ else
++ return sprintf(buf, "err: no display chan\n");
++}
++
++static ssize_t swap_disp_chan(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++ struct mxcfb_info *fg_mxcfbi = NULL;
++
++ console_lock();
++ /* swap only happen between DP-BG and DC, while DP-FG disable */
++ if (((mxcfbi->ipu_ch == MEM_BG_SYNC) &&
++ (strstr(buf, "1-layer-fb") != NULL)) ||
++ ((mxcfbi->ipu_ch == MEM_DC_SYNC) &&
++ (strstr(buf, "2-layer-fb-bg") != NULL))) {
++ struct fb_info *fbi_fg;
++
++ fbi_fg = found_registered_fb(MEM_FG_SYNC, mxcfbi->ipu_id);
++ if (fbi_fg)
++ fg_mxcfbi = (struct mxcfb_info *)fbi_fg->par;
++
++ if (!fg_mxcfbi ||
++ fg_mxcfbi->cur_blank == FB_BLANK_UNBLANK) {
++ dev_err(dev,
++ "Can not switch while fb2(fb-fg) is on.\n");
++ console_unlock();
++ return count;
++ }
++
++ if (swap_channels(info) < 0)
++ dev_err(dev, "Swap display channel failed.\n");
++ }
++
++ console_unlock();
++ return count;
++}
++static DEVICE_ATTR(fsl_disp_property, S_IWUSR | S_IRUGO,
++ show_disp_chan, swap_disp_chan);
++
++static ssize_t show_disp_dev(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct fb_info *info = dev_get_drvdata(dev);
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)info->par;
++
++ if (mxcfbi->ipu_ch == MEM_FG_SYNC)
++ return sprintf(buf, "overlay\n");
++ else
++ return sprintf(buf, "%s\n", mxcfbi->dispdrv->drv->name);
++}
++static DEVICE_ATTR(fsl_disp_dev_property, S_IRUGO, show_disp_dev, NULL);
++
++static int mxcfb_dispdrv_init(struct platform_device *pdev,
++ struct fb_info *fbi)
++{
++ struct ipuv3_fb_platform_data *plat_data = pdev->dev.platform_data;
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++ struct mxc_dispdrv_setting setting;
++ char disp_dev[32], *default_dev = "lcd";
++ int ret = 0;
++
++ setting.if_fmt = plat_data->interface_pix_fmt;
++ setting.dft_mode_str = plat_data->mode_str;
++ setting.default_bpp = plat_data->default_bpp;
++ if (!setting.default_bpp)
++ setting.default_bpp = 16;
++ setting.fbi = fbi;
++ if (!strlen(plat_data->disp_dev)) {
++ memcpy(disp_dev, default_dev, strlen(default_dev));
++ disp_dev[strlen(default_dev)] = '\0';
++ } else {
++ memcpy(disp_dev, plat_data->disp_dev,
++ strlen(plat_data->disp_dev));
++ disp_dev[strlen(plat_data->disp_dev)] = '\0';
++ }
++
++ dev_info(&pdev->dev, "register mxc display driver %s\n", disp_dev);
++
++ mxcfbi->dispdrv = mxc_dispdrv_gethandle(disp_dev, &setting);
++ if (IS_ERR(mxcfbi->dispdrv)) {
++ ret = PTR_ERR(mxcfbi->dispdrv);
++ dev_err(&pdev->dev, "NO mxc display driver found!\n");
++ return ret;
++ } else {
++ /* fix-up */
++ mxcfbi->ipu_di_pix_fmt = setting.if_fmt;
++ mxcfbi->default_bpp = setting.default_bpp;
++
++ /* setting */
++ mxcfbi->ipu_id = setting.dev_id;
++ mxcfbi->ipu_di = setting.disp_id;
++ dev_dbg(&pdev->dev, "di_pixfmt:0x%x, bpp:0x%x, di:%d, ipu:%d\n",
++ setting.if_fmt, setting.default_bpp,
++ setting.disp_id, setting.dev_id);
++ }
++
++ return ret;
++}
++
++/*
++ * Parse user specified options (`video=trident:')
++ * example:
++ * video=mxcfb0:dev=lcd,800x480M-16@55,if=RGB565,bpp=16,noaccel
++ * video=mxcfb0:dev=lcd,800x480M-16@55,if=RGB565,fbpix=RGB565
++ */
++static int mxcfb_option_setup(struct platform_device *pdev, struct fb_info *fbi)
++{
++ struct ipuv3_fb_platform_data *pdata = pdev->dev.platform_data;
++ char *options, *opt, *fb_mode_str = NULL;
++ char name[] = "mxcfb0";
++ uint32_t fb_pix_fmt = 0;
++
++ name[5] += pdev->id;
++ if (fb_get_options(name, &options)) {
++ dev_err(&pdev->dev, "Can't get fb option for %s!\n", name);
++ return -ENODEV;
++ }
++
++ if (!options || !*options)
++ return 0;
++
++ while ((opt = strsep(&options, ",")) != NULL) {
++ if (!*opt)
++ continue;
++
++ if (!strncmp(opt, "dev=", 4)) {
++ memcpy(pdata->disp_dev, opt + 4, strlen(opt) - 4);
++ pdata->disp_dev[strlen(opt) - 4] = '\0';
++ } else if (!strncmp(opt, "if=", 3)) {
++ if (!strncmp(opt+3, "RGB24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(opt+3, "BGR24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(opt+3, "GBR24", 5))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(opt+3, "RGB565", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(opt+3, "RGB666", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(opt+3, "YUV444", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(opt+3, "LVDS666", 7))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(opt+3, "YUYV16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(opt+3, "UYVY16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(opt+3, "YVYU16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(opt+3, "VYUY16", 6))
++ pdata->interface_pix_fmt = IPU_PIX_FMT_VYUY;
++ } else if (!strncmp(opt, "fbpix=", 6)) {
++ if (!strncmp(opt+6, "RGB24", 5))
++ fb_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(opt+6, "BGR24", 5))
++ fb_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(opt+6, "RGB32", 5))
++ fb_pix_fmt = IPU_PIX_FMT_RGB32;
++ else if (!strncmp(opt+6, "BGR32", 5))
++ fb_pix_fmt = IPU_PIX_FMT_BGR32;
++ else if (!strncmp(opt+6, "ABGR32", 6))
++ fb_pix_fmt = IPU_PIX_FMT_ABGR32;
++ else if (!strncmp(opt+6, "RGB565", 6))
++ fb_pix_fmt = IPU_PIX_FMT_RGB565;
++
++ if (fb_pix_fmt) {
++ pixfmt_to_var(fb_pix_fmt, &fbi->var);
++ pdata->default_bpp =
++ fbi->var.bits_per_pixel;
++ }
++ } else if (!strncmp(opt, "int_clk", 7)) {
++ pdata->int_clk = true;
++ continue;
++ } else if (!strncmp(opt, "bpp=", 4)) {
++ /* bpp setting cannot overwirte fbpix setting */
++ if (fb_pix_fmt)
++ continue;
++
++ pdata->default_bpp =
++ simple_strtoul(opt + 4, NULL, 0);
++
++ fb_pix_fmt = bpp_to_pixfmt(pdata->default_bpp);
++ if (fb_pix_fmt)
++ pixfmt_to_var(fb_pix_fmt, &fbi->var);
++ } else
++ fb_mode_str = opt;
++ }
++
++ if (fb_mode_str)
++ pdata->mode_str = fb_mode_str;
++
++ return 0;
++}
++
++static int mxcfb_register(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++ struct fb_videomode m;
++ int ret = 0;
++ char bg0_id[] = "DISP3 BG";
++ char bg1_id[] = "DISP3 BG - DI1";
++ char fg_id[] = "DISP3 FG";
++
++ if (mxcfbi->ipu_di == 0) {
++ bg0_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, bg0_id);
++ } else if (mxcfbi->ipu_di == 1) {
++ bg1_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, bg1_id);
++ } else { /* Overlay */
++ fg_id[4] += mxcfbi->ipu_id;
++ strcpy(fbi->fix.id, fg_id);
++ }
++
++ mxcfb_check_var(&fbi->var, fbi);
++
++ mxcfb_set_fix(fbi);
++
++ /* Added first mode to fbi modelist. */
++ if (!fbi->modelist.next || !fbi->modelist.prev)
++ INIT_LIST_HEAD(&fbi->modelist);
++ fb_var_to_videomode(&m, &fbi->var);
++ fb_add_videomode(&m, &fbi->modelist);
++
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq,
++ mxcfb_irq_handler, IPU_IRQF_ONESHOT, MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering EOF irq handler.\n");
++ ret = -EBUSY;
++ goto err0;
++ }
++ ipu_disable_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq);
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq,
++ mxcfb_nf_irq_handler, IPU_IRQF_ONESHOT, MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering NFACK irq handler.\n");
++ ret = -EBUSY;
++ goto err1;
++ }
++ ipu_disable_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq);
++
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ if (ipu_request_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq,
++ mxcfb_alpha_irq_handler, IPU_IRQF_ONESHOT,
++ MXCFB_NAME, fbi) != 0) {
++ dev_err(fbi->device, "Error registering alpha irq "
++ "handler.\n");
++ ret = -EBUSY;
++ goto err2;
++ }
++
++ if (!mxcfbi->late_init) {
++ fbi->var.activate |= FB_ACTIVATE_FORCE;
++ console_lock();
++ fbi->flags |= FBINFO_MISC_USEREVENT;
++ ret = fb_set_var(fbi, &fbi->var);
++ fbi->flags &= ~FBINFO_MISC_USEREVENT;
++ console_unlock();
++ if (ret < 0) {
++ dev_err(fbi->device, "Error fb_set_var ret:%d\n", ret);
++ goto err3;
++ }
++
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ ret = fb_blank(fbi, FB_BLANK_UNBLANK);
++ console_unlock();
++ if (ret < 0) {
++ dev_err(fbi->device,
++ "Error fb_blank ret:%d\n", ret);
++ goto err4;
++ }
++ }
++ } else {
++ /*
++ * Setup the channel again though bootloader
++ * has done this, then set_par() can stop the
++ * channel neatly and re-initialize it .
++ */
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ _setup_disp_channel1(fbi);
++ ipu_enable_channel(mxcfbi->ipu, mxcfbi->ipu_ch);
++ console_unlock();
++ }
++ }
++
++
++ ret = register_framebuffer(fbi);
++ if (ret < 0)
++ goto err5;
++
++ return ret;
++err5:
++ if (mxcfbi->next_blank == FB_BLANK_UNBLANK) {
++ console_lock();
++ if (!mxcfbi->late_init)
++ fb_blank(fbi, FB_BLANK_POWERDOWN);
++ else {
++ ipu_disable_channel(mxcfbi->ipu, mxcfbi->ipu_ch,
++ true);
++ ipu_uninit_channel(mxcfbi->ipu, mxcfbi->ipu_ch);
++ }
++ console_unlock();
++ }
++err4:
++err3:
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq, fbi);
++err2:
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq, fbi);
++err1:
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq, fbi);
++err0:
++ return ret;
++}
++
++static void mxcfb_unregister(struct fb_info *fbi)
++{
++ struct mxcfb_info *mxcfbi = (struct mxcfb_info *)fbi->par;
++
++ if (mxcfbi->ipu_alp_ch_irq != -1)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_alp_ch_irq, fbi);
++ if (mxcfbi->ipu_ch_irq)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_irq, fbi);
++ if (mxcfbi->ipu_ch_nf_irq)
++ ipu_free_irq(mxcfbi->ipu, mxcfbi->ipu_ch_nf_irq, fbi);
++
++ unregister_framebuffer(fbi);
++}
++
++static int mxcfb_setup_overlay(struct platform_device *pdev,
++ struct fb_info *fbi_bg, struct resource *res)
++{
++ struct fb_info *ovfbi;
++ struct mxcfb_info *mxcfbi_bg = (struct mxcfb_info *)fbi_bg->par;
++ struct mxcfb_info *mxcfbi_fg;
++ int ret = 0;
++
++ ovfbi = mxcfb_init_fbinfo(&pdev->dev, &mxcfb_ops);
++ if (!ovfbi) {
++ ret = -ENOMEM;
++ goto init_ovfbinfo_failed;
++ }
++ mxcfbi_fg = (struct mxcfb_info *)ovfbi->par;
++
++ mxcfbi_fg->ipu = ipu_get_soc(mxcfbi_bg->ipu_id);
++ if (IS_ERR(mxcfbi_fg->ipu)) {
++ ret = -ENODEV;
++ goto get_ipu_failed;
++ }
++ mxcfbi_fg->ipu_id = mxcfbi_bg->ipu_id;
++ mxcfbi_fg->ipu_ch_irq = IPU_IRQ_FG_SYNC_EOF;
++ mxcfbi_fg->ipu_ch_nf_irq = IPU_IRQ_FG_SYNC_NFACK;
++ mxcfbi_fg->ipu_alp_ch_irq = IPU_IRQ_FG_ALPHA_SYNC_EOF;
++ mxcfbi_fg->ipu_ch = MEM_FG_SYNC;
++ mxcfbi_fg->ipu_di = -1;
++ mxcfbi_fg->ipu_di_pix_fmt = mxcfbi_bg->ipu_di_pix_fmt;
++ mxcfbi_fg->overlay = true;
++ mxcfbi_fg->cur_blank = mxcfbi_fg->next_blank = FB_BLANK_POWERDOWN;
++
++ /* Need dummy values until real panel is configured */
++ ovfbi->var.xres = 240;
++ ovfbi->var.yres = 320;
++
++ if (res && res->start && res->end) {
++ ovfbi->fix.smem_len = res->end - res->start + 1;
++ ovfbi->fix.smem_start = res->start;
++ ovfbi->screen_base = ioremap(
++ ovfbi->fix.smem_start,
++ ovfbi->fix.smem_len);
++ }
++
++ ret = mxcfb_register(ovfbi);
++ if (ret < 0)
++ goto register_ov_failed;
++
++ mxcfbi_bg->ovfbi = ovfbi;
++
++ return ret;
++
++register_ov_failed:
++get_ipu_failed:
++ fb_dealloc_cmap(&ovfbi->cmap);
++ framebuffer_release(ovfbi);
++init_ovfbinfo_failed:
++ return ret;
++}
++
++static void mxcfb_unsetup_overlay(struct fb_info *fbi_bg)
++{
++ struct mxcfb_info *mxcfbi_bg = (struct mxcfb_info *)fbi_bg->par;
++ struct fb_info *ovfbi = mxcfbi_bg->ovfbi;
++
++ mxcfb_unregister(ovfbi);
++
++ if (&ovfbi->cmap)
++ fb_dealloc_cmap(&ovfbi->cmap);
++ framebuffer_release(ovfbi);
++}
++
++static bool ipu_usage[2][2];
++static int ipu_test_set_usage(int ipu, int di)
++{
++ if (ipu_usage[ipu][di])
++ return -EBUSY;
++ else
++ ipu_usage[ipu][di] = true;
++ return 0;
++}
++
++static void ipu_clear_usage(int ipu, int di)
++{
++ ipu_usage[ipu][di] = false;
++}
++
++static int mxcfb_get_of_property(struct platform_device *pdev,
++ struct ipuv3_fb_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ const char *disp_dev;
++ const char *mode_str;
++ const char *pixfmt;
++ int err;
++ int len;
++ u32 bpp, int_clk;
++ u32 late_init;
++
++ err = of_property_read_string(np, "disp_dev", &disp_dev);
++ if (err < 0) {
++ dev_dbg(&pdev->dev, "get of property disp_dev fail\n");
++ return err;
++ }
++ err = of_property_read_string(np, "mode_str", &mode_str);
++ if (err < 0) {
++ dev_dbg(&pdev->dev, "get of property mode_str fail\n");
++ return err;
++ }
++ err = of_property_read_string(np, "interface_pix_fmt", &pixfmt);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property pix fmt fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "default_bpp", &bpp);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property bpp fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "int_clk", &int_clk);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property int_clk fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "late_init", &late_init);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property late_init fail\n");
++ return err;
++ }
++
++ if (!strncmp(pixfmt, "RGB24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(pixfmt, "BGR24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(pixfmt, "GBR24", 5))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(pixfmt, "RGB565", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(pixfmt, "RGB666", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(pixfmt, "YUV444", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(pixfmt, "LVDS666", 7))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(pixfmt, "YUYV16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(pixfmt, "UYVY16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(pixfmt, "YVYU16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(pixfmt, "VYUY16", 6))
++ plat_data->interface_pix_fmt = IPU_PIX_FMT_VYUY;
++ else {
++ dev_err(&pdev->dev, "err interface_pix_fmt!\n");
++ return -ENOENT;
++ }
++
++ len = min(sizeof(plat_data->disp_dev) - 1, strlen(disp_dev));
++ memcpy(plat_data->disp_dev, disp_dev, len);
++ plat_data->disp_dev[len] = '\0';
++ plat_data->mode_str = (char *)mode_str;
++ plat_data->default_bpp = bpp;
++ plat_data->int_clk = (bool)int_clk;
++ plat_data->late_init = (bool)late_init;
++ return err;
++}
++
++/*!
++ * Probe routine for the framebuffer driver. It is called during the
++ * driver binding process. The following functions are performed in
++ * this routine: Framebuffer initialization, Memory allocation and
++ * mapping, Framebuffer registration, IPU initialization.
++ *
++ * @return Appropriate error code to the kernel common code
++ */
++static int mxcfb_probe(struct platform_device *pdev)
++{
++ struct ipuv3_fb_platform_data *plat_data;
++ struct fb_info *fbi;
++ struct mxcfb_info *mxcfbi;
++ struct resource *res;
++ int ret = 0;
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ pdev->id = of_alias_get_id(pdev->dev.of_node, "mxcfb");
++ if (pdev->id < 0) {
++ dev_err(&pdev->dev, "can not get alias id\n");
++ return pdev->id;
++ }
++
++ plat_data = devm_kzalloc(&pdev->dev, sizeof(struct
++ ipuv3_fb_platform_data), GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++
++ ret = mxcfb_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get mxcfb of property fail\n");
++ return ret;
++ }
++
++ /* Initialize FB structures */
++ fbi = mxcfb_init_fbinfo(&pdev->dev, &mxcfb_ops);
++ if (!fbi) {
++ ret = -ENOMEM;
++ goto init_fbinfo_failed;
++ }
++
++ ret = mxcfb_option_setup(pdev, fbi);
++ if (ret)
++ goto get_fb_option_failed;
++
++ mxcfbi = (struct mxcfb_info *)fbi->par;
++ mxcfbi->ipu_int_clk = plat_data->int_clk;
++ mxcfbi->late_init = plat_data->late_init;
++ mxcfbi->first_set_par = true;
++ ret = mxcfb_dispdrv_init(pdev, fbi);
++ if (ret < 0)
++ goto init_dispdrv_failed;
++
++ ret = ipu_test_set_usage(mxcfbi->ipu_id, mxcfbi->ipu_di);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "ipu%d-di%d already in use\n",
++ mxcfbi->ipu_id, mxcfbi->ipu_di);
++ goto ipu_in_busy;
++ }
++
++ if (mxcfbi->dispdrv->drv->post_init) {
++ ret = mxcfbi->dispdrv->drv->post_init(mxcfbi->dispdrv,
++ mxcfbi->ipu_id,
++ mxcfbi->ipu_di);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "post init failed\n");
++ goto post_init_failed;
++ }
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (res && res->start && res->end) {
++ fbi->fix.smem_len = res->end - res->start + 1;
++ fbi->fix.smem_start = res->start;
++ fbi->screen_base = ioremap(fbi->fix.smem_start, fbi->fix.smem_len);
++ /* Do not clear the fb content drawn in bootloader. */
++ if (!mxcfbi->late_init)
++ memset(fbi->screen_base, 0, fbi->fix.smem_len);
++ }
++
++ mxcfbi->ipu = ipu_get_soc(mxcfbi->ipu_id);
++ if (IS_ERR(mxcfbi->ipu)) {
++ ret = -ENODEV;
++ goto get_ipu_failed;
++ }
++
++ /* first user uses DP with alpha feature */
++ if (!g_dp_in_use[mxcfbi->ipu_id]) {
++ mxcfbi->ipu_ch_irq = IPU_IRQ_BG_SYNC_EOF;
++ mxcfbi->ipu_ch_nf_irq = IPU_IRQ_BG_SYNC_NFACK;
++ mxcfbi->ipu_alp_ch_irq = IPU_IRQ_BG_ALPHA_SYNC_EOF;
++ mxcfbi->ipu_ch = MEM_BG_SYNC;
++ /* Unblank the primary fb only by default */
++ if (pdev->id == 0)
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_UNBLANK;
++ else
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_POWERDOWN;
++
++ ret = mxcfb_register(fbi);
++ if (ret < 0)
++ goto mxcfb_register_failed;
++
++ ipu_disp_set_global_alpha(mxcfbi->ipu, mxcfbi->ipu_ch,
++ true, 0x80);
++ ipu_disp_set_color_key(mxcfbi->ipu, mxcfbi->ipu_ch, false, 0);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ ret = mxcfb_setup_overlay(pdev, fbi, res);
++
++ if (ret < 0) {
++ mxcfb_unregister(fbi);
++ goto mxcfb_setupoverlay_failed;
++ }
++
++ g_dp_in_use[mxcfbi->ipu_id] = true;
++
++ ret = device_create_file(mxcfbi->ovfbi->dev,
++ &dev_attr_fsl_disp_property);
++ if (ret)
++ dev_err(mxcfbi->ovfbi->dev, "Error %d on creating "
++ "file for disp property\n",
++ ret);
++
++ ret = device_create_file(mxcfbi->ovfbi->dev,
++ &dev_attr_fsl_disp_dev_property);
++ if (ret)
++ dev_err(mxcfbi->ovfbi->dev, "Error %d on creating "
++ "file for disp device "
++ "propety\n", ret);
++ } else {
++ mxcfbi->ipu_ch_irq = IPU_IRQ_DC_SYNC_EOF;
++ mxcfbi->ipu_ch_nf_irq = IPU_IRQ_DC_SYNC_NFACK;
++ mxcfbi->ipu_alp_ch_irq = -1;
++ mxcfbi->ipu_ch = MEM_DC_SYNC;
++ mxcfbi->cur_blank = mxcfbi->next_blank = FB_BLANK_POWERDOWN;
++
++ ret = mxcfb_register(fbi);
++ if (ret < 0)
++ goto mxcfb_register_failed;
++ }
++
++ platform_set_drvdata(pdev, fbi);
++
++ ret = device_create_file(fbi->dev, &dev_attr_fsl_disp_property);
++ if (ret)
++ dev_err(&pdev->dev, "Error %d on creating file for disp "
++ "property\n", ret);
++
++ ret = device_create_file(fbi->dev, &dev_attr_fsl_disp_dev_property);
++ if (ret)
++ dev_err(&pdev->dev, "Error %d on creating file for disp "
++ " device propety\n", ret);
++
++ return 0;
++
++mxcfb_setupoverlay_failed:
++mxcfb_register_failed:
++get_ipu_failed:
++post_init_failed:
++ ipu_clear_usage(mxcfbi->ipu_id, mxcfbi->ipu_di);
++ipu_in_busy:
++init_dispdrv_failed:
++ fb_dealloc_cmap(&fbi->cmap);
++ framebuffer_release(fbi);
++get_fb_option_failed:
++init_fbinfo_failed:
++ return ret;
++}
++
++static int mxcfb_remove(struct platform_device *pdev)
++{
++ struct fb_info *fbi = platform_get_drvdata(pdev);
++ struct mxcfb_info *mxc_fbi = fbi->par;
++
++ if (!fbi)
++ return 0;
++
++ device_remove_file(fbi->dev, &dev_attr_fsl_disp_dev_property);
++ device_remove_file(fbi->dev, &dev_attr_fsl_disp_property);
++ mxcfb_blank(FB_BLANK_POWERDOWN, fbi);
++ mxcfb_unregister(fbi);
++ mxcfb_unmap_video_memory(fbi);
++
++ if (mxc_fbi->ovfbi) {
++ device_remove_file(mxc_fbi->ovfbi->dev,
++ &dev_attr_fsl_disp_dev_property);
++ device_remove_file(mxc_fbi->ovfbi->dev,
++ &dev_attr_fsl_disp_property);
++ mxcfb_blank(FB_BLANK_POWERDOWN, mxc_fbi->ovfbi);
++ mxcfb_unsetup_overlay(fbi);
++ mxcfb_unmap_video_memory(mxc_fbi->ovfbi);
++ }
++
++ ipu_clear_usage(mxc_fbi->ipu_id, mxc_fbi->ipu_di);
++ if (&fbi->cmap)
++ fb_dealloc_cmap(&fbi->cmap);
++ framebuffer_release(fbi);
++ return 0;
++}
++
++static const struct of_device_id imx_mxcfb_dt_ids[] = {
++ { .compatible = "fsl,mxc_sdc_fb"},
++ { /* sentinel */ }
++};
++
++/*!
++ * This structure contains pointers to the power management callback functions.
++ */
++static struct platform_driver mxcfb_driver = {
++ .driver = {
++ .name = MXCFB_NAME,
++ .of_match_table = imx_mxcfb_dt_ids,
++ },
++ .probe = mxcfb_probe,
++ .remove = mxcfb_remove,
++ .suspend = mxcfb_suspend,
++ .resume = mxcfb_resume,
++};
++
++/*!
++ * Main entry function for the framebuffer. The function registers the power
++ * management callback functions with the kernel and also registers the MXCFB
++ * callback functions with the core Linux framebuffer driver \b fbmem.c
++ *
++ * @return Error code indicating success or failure
++ */
++int __init mxcfb_init(void)
++{
++ return platform_driver_register(&mxcfb_driver);
++}
++
++void mxcfb_exit(void)
++{
++ platform_driver_unregister(&mxcfb_driver);
++}
++
++module_init(mxcfb_init);
++module_exit(mxcfb_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("MXC framebuffer driver");
++MODULE_LICENSE("GPL");
++MODULE_SUPPORTED_DEVICE("fb");
+diff -Nur linux-4.1.3/drivers/video/mxc/mxc_lcdif.c linux-xbian-imx6/drivers/video/mxc/mxc_lcdif.c
+--- linux-4.1.3/drivers/video/mxc/mxc_lcdif.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/drivers/video/mxc/mxc_lcdif.c 2015-07-27 23:13:08.753749907 +0200
+@@ -0,0 +1,241 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#include <linux/init.h>
++#include <linux/ipu.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mxcfb.h>
++#include <linux/of_device.h>
++#include <linux/pinctrl/consumer.h>
++#include <linux/platform_device.h>
++
++#include "mxc_dispdrv.h"
++
++struct mxc_lcd_platform_data {
++ u32 default_ifmt;
++ u32 ipu_id;
++ u32 disp_id;
++};
++
++struct mxc_lcdif_data {
++ struct platform_device *pdev;
++ struct mxc_dispdrv_handle *disp_lcdif;
++};
++
++#define DISPDRV_LCD "lcd"
++
++static struct fb_videomode lcdif_modedb[] = {
++ {
++ /* 800x480 @ 57 Hz , pixel clk @ 27MHz */
++ "CLAA-WVGA", 57, 800, 480, 37037, 40, 60, 10, 10, 20, 10,
++ FB_SYNC_CLK_LAT_FALL,
++ FB_VMODE_NONINTERLACED,
++ 0,},
++ {
++ /* 800x480 @ 60 Hz , pixel clk @ 32MHz */
++ "SEIKO-WVGA", 60, 800, 480, 29850, 89, 164, 23, 10, 10, 10,
++ FB_SYNC_CLK_LAT_FALL,
++ FB_VMODE_NONINTERLACED,
++ 0,},
++ {
++ /* 1920x1080i @ 50 Hz , pixel clk @ 74.5MHz */
++ "LCD-1080I50", 50, 1920, 1080, 13468, 528, 148, 4, 31, 44, 10,
++ FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++ FB_VMODE_INTERLACED,
++ 0,},
++};
++static int lcdif_modedb_sz = ARRAY_SIZE(lcdif_modedb);
++
++static int lcdif_init(struct mxc_dispdrv_handle *disp,
++ struct mxc_dispdrv_setting *setting)
++{
++ int ret, i;
++ struct mxc_lcdif_data *lcdif = mxc_dispdrv_getdata(disp);
++ struct mxc_lcd_platform_data *plat_data
++ = lcdif->pdev->dev.platform_data;
++ struct fb_videomode *modedb = lcdif_modedb;
++ int modedb_sz = lcdif_modedb_sz;
++
++ /* use platform defined ipu/di */
++ setting->dev_id = plat_data->ipu_id;
++ setting->disp_id = plat_data->disp_id;
++
++ ret = fb_find_mode(&setting->fbi->var, setting->fbi, setting->dft_mode_str,
++ modedb, modedb_sz, NULL, setting->default_bpp);
++ if (!ret) {
++ fb_videomode_to_var(&setting->fbi->var, &modedb[0]);
++ setting->if_fmt = plat_data->default_ifmt;
++ }
++
++ INIT_LIST_HEAD(&setting->fbi->modelist);
++ for (i = 0; i < modedb_sz; i++) {
++ struct fb_videomode m;
++ fb_var_to_videomode(&m, &setting->fbi->var);
++ if (fb_mode_is_equal(&m, &modedb[i])) {
++ fb_add_videomode(&modedb[i],
++ &setting->fbi->modelist);
++ break;
++ }
++ }
++
++ return ret;
++}
++
++void lcdif_deinit(struct mxc_dispdrv_handle *disp)
++{
++ /*TODO*/
++}
++
++static struct mxc_dispdrv_driver lcdif_drv = {
++ .name = DISPDRV_LCD,
++ .init = lcdif_init,
++ .deinit = lcdif_deinit,
++};
++
++static int lcd_get_of_property(struct platform_device *pdev,
++ struct mxc_lcd_platform_data *plat_data)
++{
++ struct device_node *np = pdev->dev.of_node;
++ int err;
++ u32 ipu_id, disp_id;
++ const char *default_ifmt;
++
++ err = of_property_read_string(np, "default_ifmt", &default_ifmt);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property default_ifmt fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "ipu_id", &ipu_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property ipu_id fail\n");
++ return err;
++ }
++ err = of_property_read_u32(np, "disp_id", &disp_id);
++ if (err) {
++ dev_dbg(&pdev->dev, "get of property disp_id fail\n");
++ return err;
++ }
++
++ plat_data->ipu_id = ipu_id;
++ plat_data->disp_id = disp_id;
++ if (!strncmp(default_ifmt, "RGB24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB24;
++ else if (!strncmp(default_ifmt, "BGR24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_BGR24;
++ else if (!strncmp(default_ifmt, "GBR24", 5))
++ plat_data->default_ifmt = IPU_PIX_FMT_GBR24;
++ else if (!strncmp(default_ifmt, "RGB565", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB565;
++ else if (!strncmp(default_ifmt, "RGB666", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_RGB666;
++ else if (!strncmp(default_ifmt, "YUV444", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YUV444;
++ else if (!strncmp(default_ifmt, "LVDS666", 7))
++ plat_data->default_ifmt = IPU_PIX_FMT_LVDS666;
++ else if (!strncmp(default_ifmt, "YUYV16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YUYV;
++ else if (!strncmp(default_ifmt, "UYVY16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_UYVY;
++ else if (!strncmp(default_ifmt, "YVYU16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_YVYU;
++ else if (!strncmp(default_ifmt, "VYUY16", 6))
++ plat_data->default_ifmt = IPU_PIX_FMT_VYUY;
++ else {
++ dev_err(&pdev->dev, "err default_ifmt!\n");
++ return -ENOENT;
++ }
++
++ return err;
++}
++
++static int mxc_lcdif_probe(struct platform_device *pdev)
++{
++ int ret;
++ struct pinctrl *pinctrl;
++ struct mxc_lcdif_data *lcdif;
++ struct mxc_lcd_platform_data *plat_data;
++
++ dev_dbg(&pdev->dev, "%s enter\n", __func__);
++ lcdif = devm_kzalloc(&pdev->dev, sizeof(struct mxc_lcdif_data),
++ GFP_KERNEL);
++ if (!lcdif)
++ return -ENOMEM;
++ plat_data = devm_kzalloc(&pdev->dev,
++ sizeof(struct mxc_lcd_platform_data),
++ GFP_KERNEL);
++ if (!plat_data)
++ return -ENOMEM;
++ pdev->dev.platform_data = plat_data;
++
++ ret = lcd_get_of_property(pdev, plat_data);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "get lcd of property fail\n");
++ return ret;
++ }
++
++ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
++ if (IS_ERR(pinctrl)) {
++ dev_err(&pdev->dev, "can't get/select pinctrl\n");
++ return PTR_ERR(pinctrl);
++ }
++
++ lcdif->pdev = pdev;
++ lcdif->disp_lcdif = mxc_dispdrv_register(&lcdif_drv);
++ mxc_dispdrv_setdata(lcdif->disp_lcdif, lcdif);
++
++ dev_set_drvdata(&pdev->dev, lcdif);
++ dev_dbg(&pdev->dev, "%s exit\n", __func__);
++
++ return ret;
++}
++
++static int mxc_lcdif_remove(struct platform_device *pdev)
++{
++ struct mxc_lcdif_data *lcdif = dev_get_drvdata(&pdev->dev);
++
++ mxc_dispdrv_puthandle(lcdif->disp_lcdif);
++ mxc_dispdrv_unregister(lcdif->disp_lcdif);
++ kfree(lcdif);
++ return 0;
++}
++
++static const struct of_device_id imx_lcd_dt_ids[] = {
++ { .compatible = "fsl,lcd"},
++ { /* sentinel */ }
++};
++static struct platform_driver mxc_lcdif_driver = {
++ .driver = {
++ .name = "mxc_lcdif",
++ .of_match_table = imx_lcd_dt_ids,
++ },
++ .probe = mxc_lcdif_probe,
++ .remove = mxc_lcdif_remove,
++};
++
++static int __init mxc_lcdif_init(void)
++{
++ return platform_driver_register(&mxc_lcdif_driver);
++}
++
++static void __exit mxc_lcdif_exit(void)
++{
++ platform_driver_unregister(&mxc_lcdif_driver);
++}
++
++module_init(mxc_lcdif_init);
++module_exit(mxc_lcdif_exit);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX ipuv3 LCD extern port driver");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/firmware/imx/sdma/sdma-imx6q.bin.ihex linux-xbian-imx6/firmware/imx/sdma/sdma-imx6q.bin.ihex
+--- linux-4.1.3/firmware/imx/sdma/sdma-imx6q.bin.ihex 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/firmware/imx/sdma/sdma-imx6q.bin.ihex 2015-07-27 23:13:08.841437049 +0200
+@@ -0,0 +1,116 @@
++:1000000053444D4101000000010000001C000000AD
++:1000100026000000B40000007A0600008202000002
++:10002000FFFFFFFF00000000FFFFFFFFFFFFFFFFDC
++:10003000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFD0
++:10004000FFFFFFFFFFFFFFFF6A1A0000FFFFFFFF38
++:10005000EB020000BB180000FFFFFFFF08040000D8
++:10006000FFFFFFFFC0030000FFFFFFFFFFFFFFFFD9
++:10007000FFFFFFFFAB020000FFFFFFFF7B0300005D
++:10008000FFFFFFFFFFFFFFFF4C0400006E040000B6
++:10009000FFFFFFFF00180000FFFFFFFFFFFFFFFF54
++:1000A000000000000018000062180000161A00008E
++:1000B000061B0000E3C1DB57E35FE357F352016A1D
++:1000C0008F00D500017D8D00A005EB5D7804037DD8
++:1000D00079042C7D367C79041F7CEE56000F600677
++:1000E000057D0965437E0A62417E20980A623E7E54
++:1000F00009653C7E12051205AD026007037DFB55C4
++:10010000D36D2B98FB55041DD36DC86A2F7F011F3B
++:1001100003200048E47C5398FB55D76D1500057803
++:100120000962C86A0962C86AD76D5298FB55D76DD3
++:100130001500150005780A62C86A0A62C86AD76D98
++:100140005298FB55D76D15001500150005780B6208
++:10015000C86A0B62C86AD76D097CDF6D077F000033
++:10016000EB55004D077DFAC1E35706980700CC68B0
++:100170000C6813C20AC20398D9C1E3C1DB57E35F1D
++:10018000E357F352216A8F00D500017D8D00A00551
++:10019000EB5DFB567804037D79042A7D317C79047C
++:1001A000207C700B1103EB53000F6003057D096584
++:1001B000377E0A62357E86980A62327E0965307E15
++:1001C00012051205AD026007027C065A8E98265A67
++:1001D000277F011F03200048E87C700B1103135395
++:1001E000AF98150004780962065A0962265AAE983B
++:1001F0001500150004780A62065A0A62265AAE985B
++:1002000015001500150004780B62065A0B62265A79
++:10021000077C0000EB55004D067DFAC1E357699855
++:1002200007000C6813C20AC26698700B11031353BF
++:100230006C07017CD9C1FB5E8A066B07017CD9C1C2
++:10024000F35EDB59D3588F0110010F398B003CC18D
++:100250002B7DC05AC85B4EC1277C88038906E35CAE
++:10026000FF0D1105FF1DBC053E07004D187D7008F0
++:1002700011007E07097D7D07027D2852E698F8521D
++:10028000DB54BC02CC02097C7C07027D2852EF982B
++:10029000F852D354BC02CC02097D0004DD988B00D7
++:1002A000C052C85359C1D67D0002CD98FF08BF0087
++:1002B0007F07157D8804D500017D8D00A005EB5DCD
++:1002C0008F0212021202FF3ADA05027C3E071899E9
++:1002D000A402DD02027D3E0718995E071899EB55CE
++:1002E0009805EB5DF352FB546A07267D6C07017D90
++:1002F00055996B07577C6907047D6807027D010EDD
++:100300002F999358D600017D8E009355A005935DDB
++:10031000A00602780255045D1D7C004E087C69072A
++:10032000037D0255177E3C99045D147F8906935026
++:100330000048017D2799A099150006780255045DB3
++:100340004F070255245D2F07017CA09917006F0706
++:10035000017C012093559D000700A7D9F598D36C27
++:100360006907047D6807027D010E64999358D600E1
++:10037000017D8E009355A005935DA006027802557D
++:10038000C86D0F7C004E087C6907037D0255097E0D
++:100390007199C86D067F890693500048017D5C996C
++:1003A000A0999A99C36A6907047D6807027D010EC6
++:1003B00087999358D600017D8E009355A005935DD3
++:1003C000A0060278C865045D0F7C004E087C6907B2
++:1003D000037DC865097E9499045D067F8906935064
++:1003E0000048017D7F99A09993559D000700FF6CFF
++:1003F000A7D9F5980000E354EB55004D017CF59822
++:10040000DD98E354EB55FF0A1102FF1A7F07027CC7
++:10041000A005B4999D008C05BA05A0051002BA0488
++:10042000AD0454040600E3C1DB57FB52C36AF35228
++:10043000056A8F00D500017D8D00A005EB5D780475
++:10044000037D79042B7D1E7C7904337CEE56000FEE
++:10045000FB556007027DC36DD599041DC36DC8624D
++:100460003B7E6006027D10021202096A357F12028D
++:10047000096A327F1202096A2F7F011F0320004898
++:10048000E77C099AFB55C76D150015001500057826
++:10049000C8620B6AC8620B6AC76D089AFB55C76DC4
++:1004A000150015000578C8620A6AC8620A6AC76D35
++:1004B000089AFB55C76D15000578C862096AC862BD
++:1004C000096AC76D097C286A077F0000EB55004D5B
++:1004D000057DFAC1DB57BF9977C254040AC2BA99A5
++:1004E000D9C1E3C1DB57F352056A8F00D500017D06
++:1004F0008D00A005FB567804037D7904297D1F7CBF
++:1005000079042E7CE35D700D1105ED55000F600739
++:10051000027D0652329A2652337E6005027D100219
++:100520001202096A2D7F1202096A2A7F1202096AE1
++:10053000277F011F03200048EA7CE3555D9A1500E0
++:1005400015001500047806520B6A26520B6A5C9A55
++:1005500015001500047806520A6A26520A6A5C9A47
++:10056000150004780652096A2652096A097C286A2D
++:10057000077F0000DB57004D057DFAC1DB571B9A52
++:1005800077C254040AC2189AE3C1DB57F352056AD2
++:10059000FB568E02941AC36AC8626902247D941EB7
++:1005A000C36ED36EC8624802C86A9426981EC36E92
++:1005B000D36EC8624C02C86A9826C36E981EC36E7A
++:1005C000C8629826C36E6002097CC8626E02247DF0
++:1005D000096A1E7F0125004D257D849A286A187FAF
++:1005E00004627AC2B89AE36E8F00D805017D8D004F
++:1005F000A005C8626E02107D096A0A7F0120F97C9D
++:10060000286A067F0000004D0D7DFAC1DB576E9A07
++:10061000070004620C6AB59A286AFA7F04627AC2FB
++:1006200058045404286AF47F0AC26B9AD9C1E3C102
++:10063000DB57F352056AFB568E02941A0252690286
++:100640001D7D941E06524802065A9426981E065294
++:100650004C02065A9826981E065260020A7C98267A
++:1006600006526E02237D096A1D7F0125004D247DFF
++:10067000D19A286A177F04627AC2029B8F00D8053C
++:10068000017D8D00A00506526E02107D096A0A7F69
++:100690000120F97C286A067F0000004D0D7DFAC11B
++:1006A000DB57C19A070004620C6AFF9A286AFA7F36
++:1006B00004627AC258045404286AF47F0AC2BE9ABB
++:1006C000016E0B612F7E0B622D7E0B632B7E0C0D5A
++:1006D0001704170417049D04081DCC05017C0C0D9C
++:1006E000D16A000F4207C86FDD6F1C7F8E009D002E
++:1006F00001680B67177ED56B04080278C86F120774
++:10070000117C0B670F7E04080278C86F12070A7C01
++:10071000DD6F087FD169010FC86FDD6F037F0101B5
++:0E0720000004129B0700FF680C680002129B89
++:00000001FF
+diff -Nur linux-4.1.3/firmware/Makefile linux-xbian-imx6/firmware/Makefile
+--- linux-4.1.3/firmware/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/firmware/Makefile 2015-07-27 23:13:08.809550816 +0200
+@@ -62,6 +62,7 @@
+ radeon/RV730_pfp.bin radeon/RV730_me.bin \
+ radeon/RV710_pfp.bin radeon/RV710_me.bin
+ fw-shipped-$(CONFIG_DVB_AV7110) += av7110/bootcode.bin
++fw-shipped-$(CONFIG_IMX_SDMA) += imx/sdma/sdma-imx6q.bin
+ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
+ fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
+ e100/d102e_ucode.bin
+diff -Nur linux-4.1.3/fs/btrfs/compression.c linux-xbian-imx6/fs/btrfs/compression.c
+--- linux-4.1.3/fs/btrfs/compression.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/fs/btrfs/compression.c 2015-07-27 23:13:08.905209516 +0200
+@@ -753,6 +753,8 @@
+ static const struct btrfs_compress_op * const btrfs_compress_op[] = {
+ &btrfs_zlib_compress,
+ &btrfs_lzo_compress,
++ &btrfs_lz4_compress,
++ &btrfs_lz4hc_compress,
+ };
+
+ void __init btrfs_init_compress(void)
+diff -Nur linux-4.1.3/fs/btrfs/compression.h linux-xbian-imx6/fs/btrfs/compression.h
+--- linux-4.1.3/fs/btrfs/compression.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/fs/btrfs/compression.h 2015-07-27 23:13:08.905209516 +0200
+@@ -79,5 +79,7 @@
+
+ extern const struct btrfs_compress_op btrfs_zlib_compress;
+ extern const struct btrfs_compress_op btrfs_lzo_compress;
++extern const struct btrfs_compress_op btrfs_lz4_compress;
++extern const struct btrfs_compress_op btrfs_lz4hc_compress;
+
+ #endif
+diff -Nur linux-4.1.3/fs/btrfs/ctree.h linux-xbian-imx6/fs/btrfs/ctree.h
+--- linux-4.1.3/fs/btrfs/ctree.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/fs/btrfs/ctree.h 2015-07-27 23:13:08.909195295 +0200
+@@ -504,13 +504,7 @@
+ #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
+ #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
+ #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3)
+-/*
+- * some patches floated around with a second compression method
+- * lets save that incompat here for when they do get in
+- * Note we don't actually support it, we're just reserving the
+- * number
+- */
+-#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4)
++#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZ4 (1ULL << 4)
+
+ /*
+ * older kernels tried to do bigger metadata blocks, but the
+@@ -539,6 +533,7 @@
+ BTRFS_FEATURE_INCOMPAT_RAID56 | \
+ BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
+ BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
++ BTRFS_FEATURE_INCOMPAT_COMPRESS_LZ4 | \
+ BTRFS_FEATURE_INCOMPAT_NO_HOLES)
+
+ #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
+@@ -709,8 +704,10 @@
+ BTRFS_COMPRESS_NONE = 0,
+ BTRFS_COMPRESS_ZLIB = 1,
+ BTRFS_COMPRESS_LZO = 2,
+- BTRFS_COMPRESS_TYPES = 2,
+- BTRFS_COMPRESS_LAST = 3,
++ BTRFS_COMPRESS_LZ4 = 3,
++ BTRFS_COMPRESS_LZ4HC = 4,
++ BTRFS_COMPRESS_TYPES = 4,
++ BTRFS_COMPRESS_LAST = 5,
+ };
+
+ struct btrfs_inode_item {
+diff -Nur linux-4.1.3/fs/btrfs/disk-io.c linux-xbian-imx6/fs/btrfs/disk-io.c
+--- linux-4.1.3/fs/btrfs/disk-io.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/fs/btrfs/disk-io.c 2015-07-27 23:13:08.909195295 +0200
+@@ -2703,6 +2703,10 @@
+ if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
+ features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+
++ if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZ4 ||
++ tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZ4HC)
++ features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZ4;
++
+ if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
+ printk(KERN_INFO "BTRFS: has skinny extents\n");
+
+diff -Nur linux-4.1.3/fs/btrfs/ioctl.c linux-xbian-imx6/fs/btrfs/ioctl.c
+--- linux-4.1.3/fs/btrfs/ioctl.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/fs/btrfs/ioctl.c 2015-07-27 23:13:08.921152633 +0200
+@@ -1439,6 +1439,10 @@
+ if (range->compress_type == BTRFS_COMPRESS_LZO) {
+ btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZO);
+ }
++ if (range->compress_type == BTRFS_COMPRESS_LZ4 ||
++ range->compress_type == BTRFS_COMPRESS_LZ4HC) {
++ btrfs_set_fs_incompat(root->fs_info, COMPRESS_LZ4);
++ }
+
+ ret = defrag_count;
+
+diff -Nur linux-4.1.3/fs/btrfs/lz4_wrapper.c linux-xbian-imx6/fs/btrfs/lz4_wrapper.c
+--- linux-4.1.3/fs/btrfs/lz4_wrapper.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/fs/btrfs/lz4_wrapper.c 2015-07-27 23:13:08.921152633 +0200
+@@ -0,0 +1,487 @@
++/*
++ * Copyright (C) 2008 Oracle. All rights reserved.
++ * Copyright (C) 2013 SUSE. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public
++ * License v2 as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public
++ * License along with this program; if not, write to the
++ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
++ * Boston, MA 021110-1307, USA.
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/err.h>
++#include <linux/sched.h>
++#include <linux/pagemap.h>
++#include <linux/bio.h>
++#include <linux/lz4.h>
++#include "compression.h"
++
++#define LZ4_LEN 4
++#define LZ4_CHUNK_SIZE (4096)
++#define LZ4_MAX_WORKBUF 2*LZ4_CHUNK_SIZE
++
++struct workspace {
++ void *mem; /* work memory for compression */
++ void *buf; /* where compressed data goes */
++ void *cbuf; /* where decompressed data goes */
++ struct list_head list;
++};
++
++static void lz4_free_workspace(struct list_head *ws)
++{
++ struct workspace *workspace = list_entry(ws, struct workspace, list);
++
++ vfree(workspace->buf);
++ vfree(workspace->cbuf);
++ vfree(workspace->mem);
++ kfree(workspace);
++}
++
++static struct list_head *lz4_alloc_workspace_generic(int hi)
++{
++ struct workspace *workspace;
++
++ workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
++ if (!workspace)
++ return ERR_PTR(-ENOMEM);
++
++ if (hi)
++ workspace->mem = vmalloc(LZ4HC_MEM_COMPRESS);
++ else
++ workspace->mem = vmalloc(LZ4_MEM_COMPRESS);
++ workspace->buf = vmalloc(LZ4_MAX_WORKBUF);
++ workspace->cbuf = vmalloc(LZ4_MAX_WORKBUF);
++ if (!workspace->mem || !workspace->buf || !workspace->cbuf)
++ goto fail;
++
++ INIT_LIST_HEAD(&workspace->list);
++
++ return &workspace->list;
++fail:
++ lz4_free_workspace(&workspace->list);
++ return ERR_PTR(-ENOMEM);
++}
++
++static struct list_head *lz4_alloc_workspace(void)
++{
++ return lz4_alloc_workspace_generic(0);
++}
++
++static struct list_head *lz4hc_alloc_workspace(void)
++{
++ return lz4_alloc_workspace_generic(1);
++}
++
++static inline void write_compress_length(char *buf, size_t len)
++{
++ __le32 dlen;
++
++ dlen = cpu_to_le32(len);
++ memcpy(buf, &dlen, LZ4_LEN);
++}
++
++static inline size_t read_compress_length(char *buf)
++{
++ __le32 dlen;
++
++ memcpy(&dlen, buf, LZ4_LEN);
++ return le32_to_cpu(dlen);
++}
++
++static int lz4_compress_pages_generic(struct list_head *ws,
++ struct address_space *mapping,
++ u64 start, unsigned long len,
++ struct page **pages,
++ unsigned long nr_dest_pages,
++ unsigned long *out_pages,
++ unsigned long *total_in,
++ unsigned long *total_out,
++ unsigned long max_out, int hi)
++{
++ struct workspace *workspace = list_entry(ws, struct workspace, list);
++ int ret = 0;
++ char *data_in;
++ char *cpage_out;
++ int nr_pages = 0;
++ struct page *in_page = NULL;
++ struct page *out_page = NULL;
++ unsigned long bytes_left;
++
++ size_t in_len;
++ size_t out_len;
++ char *buf;
++ unsigned long tot_in = 0;
++ unsigned long tot_out = 0;
++ unsigned long pg_bytes_left;
++ unsigned long out_offset;
++ unsigned long bytes;
++
++ *out_pages = 0;
++ *total_out = 0;
++ *total_in = 0;
++
++ in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
++ data_in = kmap(in_page);
++
++ /*
++ * store the size of all chunks of compressed data in
++ * the first 4 bytes
++ */
++ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
++ if (out_page == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ cpage_out = kmap(out_page);
++ out_offset = LZ4_LEN;
++ tot_out = LZ4_LEN;
++ pages[0] = out_page;
++ nr_pages = 1;
++ pg_bytes_left = PAGE_CACHE_SIZE - LZ4_LEN;
++
++ /* compress at most one page of data each time */
++ in_len = min(len, PAGE_CACHE_SIZE);
++ while (tot_in < len) {
++ if (hi)
++ ret = lz4hc_compress(data_in, in_len, workspace->cbuf,
++ &out_len, workspace->mem);
++ else
++ ret = lz4_compress(data_in, in_len, workspace->cbuf, &out_len,
++ workspace->mem);
++ if (ret < 0) {
++ printk(KERN_DEBUG
++ "btrfs: lz4 compress in loop returned %d\n",
++ ret);
++ ret = -1;
++ goto out;
++ }
++
++ /* store the size of this chunk of compressed data */
++ write_compress_length(cpage_out + out_offset, out_len);
++ tot_out += LZ4_LEN;
++ out_offset += LZ4_LEN;
++ pg_bytes_left -= LZ4_LEN;
++
++ tot_in += in_len;
++ tot_out += out_len;
++
++ /* copy bytes from the working buffer into the pages */
++ buf = workspace->cbuf;
++ while (out_len) {
++ bytes = min_t(unsigned long, pg_bytes_left, out_len);
++
++ memcpy(cpage_out + out_offset, buf, bytes);
++
++ out_len -= bytes;
++ pg_bytes_left -= bytes;
++ buf += bytes;
++ out_offset += bytes;
++
++ /*
++ * we need another page for writing out.
++ *
++ * Note if there's less than 4 bytes left, we just
++ * skip to a new page.
++ */
++ if ((out_len == 0 && pg_bytes_left < LZ4_LEN) ||
++ pg_bytes_left == 0) {
++ if (pg_bytes_left) {
++ memset(cpage_out + out_offset, 0,
++ pg_bytes_left);
++ tot_out += pg_bytes_left;
++ }
++
++ /* we're done, don't allocate new page */
++ if (out_len == 0 && tot_in >= len)
++ break;
++
++ kunmap(out_page);
++ if (nr_pages == nr_dest_pages) {
++ out_page = NULL;
++ ret = -1;
++ goto out;
++ }
++
++ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
++ if (out_page == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ cpage_out = kmap(out_page);
++ pages[nr_pages++] = out_page;
++
++ pg_bytes_left = PAGE_CACHE_SIZE;
++ out_offset = 0;
++ }
++ }
++
++ /* we're making it bigger, give up */
++ if (tot_in > 8192 && tot_in < tot_out)
++ goto out;
++
++ /* we're all done */
++ if (tot_in >= len)
++ break;
++
++ if (tot_out > max_out)
++ break;
++
++ bytes_left = len - tot_in;
++ kunmap(in_page);
++ page_cache_release(in_page);
++
++ start += PAGE_CACHE_SIZE;
++ in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT);
++ data_in = kmap(in_page);
++ in_len = min(bytes_left, PAGE_CACHE_SIZE);
++ }
++
++ if (tot_out > tot_in)
++ goto out;
++
++ /* store the size of all chunks of compressed data */
++ cpage_out = kmap(pages[0]);
++ write_compress_length(cpage_out, tot_out);
++
++ kunmap(pages[0]);
++
++ ret = 0;
++ *total_out = tot_out;
++ *total_in = tot_in;
++out:
++ *out_pages = nr_pages;
++ if (out_page)
++ kunmap(out_page);
++
++ if (in_page) {
++ kunmap(in_page);
++ page_cache_release(in_page);
++ }
++
++ return ret;
++}
++
++static int lz4_compress_pages(struct list_head *ws,
++ struct address_space *mapping,
++ u64 start, unsigned long len,
++ struct page **pages,
++ unsigned long nr_dest_pages,
++ unsigned long *out_pages,
++ unsigned long *total_in,
++ unsigned long *total_out,
++ unsigned long max_out)
++{
++ return lz4_compress_pages_generic(ws, mapping, start, len, pages,
++ nr_dest_pages, out_pages, total_in, total_out,
++ max_out, 0);
++}
++
++static int lz4hc_compress_pages(struct list_head *ws,
++ struct address_space *mapping,
++ u64 start, unsigned long len,
++ struct page **pages,
++ unsigned long nr_dest_pages,
++ unsigned long *out_pages,
++ unsigned long *total_in,
++ unsigned long *total_out,
++ unsigned long max_out)
++{
++ return lz4_compress_pages_generic(ws, mapping, start, len, pages,
++ nr_dest_pages, out_pages, total_in, total_out,
++ max_out, 1);
++}
++
++static int lz4_decompress_biovec(struct list_head *ws,
++ struct page **pages_in,
++ u64 disk_start,
++ struct bio_vec *bvec,
++ int vcnt,
++ size_t srclen)
++{
++ struct workspace *workspace = list_entry(ws, struct workspace, list);
++ int ret = 0, ret2;
++ char *data_in;
++ unsigned long page_in_index = 0;
++ unsigned long page_out_index = 0;
++ unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) /
++ PAGE_CACHE_SIZE;
++ unsigned long buf_start;
++ unsigned long buf_offset = 0;
++ unsigned long bytes;
++ unsigned long working_bytes;
++ unsigned long pg_offset;
++
++ size_t in_len;
++ size_t out_len;
++ unsigned long in_offset;
++ unsigned long in_page_bytes_left;
++ unsigned long tot_in;
++ unsigned long tot_out;
++ unsigned long tot_len;
++ char *buf;
++ bool may_late_unmap, need_unmap;
++
++ data_in = kmap(pages_in[0]);
++ tot_len = read_compress_length(data_in);
++
++ tot_in = LZ4_LEN;
++ in_offset = LZ4_LEN;
++ tot_len = min_t(size_t, srclen, tot_len);
++ in_page_bytes_left = PAGE_CACHE_SIZE - LZ4_LEN;
++
++ tot_out = 0;
++ pg_offset = 0;
++
++ while (tot_in < tot_len) {
++ in_len = read_compress_length(data_in + in_offset);
++ in_page_bytes_left -= LZ4_LEN;
++ in_offset += LZ4_LEN;
++ tot_in += LZ4_LEN;
++
++ tot_in += in_len;
++ working_bytes = in_len;
++ may_late_unmap = need_unmap = false;
++
++ /* fast path: avoid using the working buffer */
++ if (in_page_bytes_left >= in_len) {
++ buf = data_in + in_offset;
++ bytes = in_len;
++ may_late_unmap = true;
++ goto cont;
++ }
++
++ /* copy bytes from the pages into the working buffer */
++ buf = workspace->cbuf;
++ buf_offset = 0;
++ while (working_bytes) {
++ bytes = min(working_bytes, in_page_bytes_left);
++
++ memcpy(buf + buf_offset, data_in + in_offset, bytes);
++ buf_offset += bytes;
++cont:
++ working_bytes -= bytes;
++ in_page_bytes_left -= bytes;
++ in_offset += bytes;
++
++ /* check if we need to pick another page */
++ if ((working_bytes == 0 && in_page_bytes_left < LZ4_LEN)
++ || in_page_bytes_left == 0) {
++ tot_in += in_page_bytes_left;
++
++ if (working_bytes == 0 && tot_in >= tot_len)
++ break;
++
++ if (page_in_index + 1 >= total_pages_in) {
++ ret = -1;
++ goto done;
++ }
++
++ if (may_late_unmap)
++ need_unmap = true;
++ else
++ kunmap(pages_in[page_in_index]);
++
++ data_in = kmap(pages_in[++page_in_index]);
++
++ in_page_bytes_left = PAGE_CACHE_SIZE;
++ in_offset = 0;
++ }
++ }
++
++ out_len = LZ4_CHUNK_SIZE;
++ ret = lz4_decompress_unknownoutputsize(buf, in_len, workspace->buf,
++ &out_len);
++ if (need_unmap)
++ kunmap(pages_in[page_in_index - 1]);
++ if (ret < 0) {
++ printk(KERN_WARNING "btrfs: lz4 decompress failed\n");
++ ret = -1;
++ break;
++ }
++
++ buf_start = tot_out;
++ tot_out += out_len;
++
++ ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
++ tot_out, disk_start,
++ bvec, vcnt,
++ &page_out_index, &pg_offset);
++ if (ret2 == 0)
++ break;
++ }
++done:
++ kunmap(pages_in[page_in_index]);
++ return ret;
++}
++
++static int lz4_decompress_wrapper(struct list_head *ws, unsigned char *data_in,
++ struct page *dest_page,
++ unsigned long start_byte,
++ size_t srclen, size_t destlen)
++{
++ struct workspace *workspace = list_entry(ws, struct workspace, list);
++ size_t in_len;
++ size_t out_len;
++ size_t tot_len;
++ int ret = 0;
++ char *kaddr;
++ unsigned long bytes;
++
++ BUG_ON(srclen < LZ4_LEN);
++
++ tot_len = read_compress_length(data_in);
++ data_in += LZ4_LEN;
++
++ in_len = read_compress_length(data_in);
++ data_in += LZ4_LEN;
++
++ out_len = LZ4_CHUNK_SIZE;
++ ret = lz4_decompress_unknownoutputsize(data_in, in_len, workspace->buf,
++ &out_len);
++ if (ret < 0) {
++ printk(KERN_WARNING "btrfs: lz4 decompress failed\n");
++ ret = -1;
++ goto out;
++ }
++
++ if (out_len < start_byte) {
++ ret = -1;
++ goto out;
++ }
++
++ bytes = min_t(unsigned long, destlen, out_len - start_byte);
++
++ kaddr = kmap_atomic(dest_page);
++ memcpy(kaddr, workspace->buf + start_byte, bytes);
++ kunmap_atomic(kaddr);
++out:
++ return ret;
++}
++
++const struct btrfs_compress_op btrfs_lz4_compress = {
++ .alloc_workspace = lz4_alloc_workspace,
++ .free_workspace = lz4_free_workspace,
++ .compress_pages = lz4_compress_pages,
++ .decompress_biovec = lz4_decompress_biovec,
++ .decompress = lz4_decompress_wrapper,
++};
++
++const struct btrfs_compress_op btrfs_lz4hc_compress = {
++ .alloc_workspace = lz4hc_alloc_workspace,
++ .free_workspace = lz4_free_workspace,
++ .compress_pages = lz4hc_compress_pages,
++ .decompress_biovec = lz4_decompress_biovec,
++ .decompress = lz4_decompress_wrapper,
++};
+diff -Nur linux-4.1.3/fs/btrfs/Makefile linux-xbian-imx6/fs/btrfs/Makefile
+--- linux-4.1.3/fs/btrfs/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/fs/btrfs/Makefile 2015-07-27 23:13:08.901223737 +0200
+@@ -9,7 +9,7 @@
+ export.o tree-log.o free-space-cache.o zlib.o lzo.o \
+ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
+ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
+- uuid-tree.o props.o hash.o
++ uuid-tree.o props.o hash.o lz4_wrapper.o
+
+ btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
+ btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
+diff -Nur linux-4.1.3/fs/btrfs/super.c linux-xbian-imx6/fs/btrfs/super.c
+--- linux-4.1.3/fs/btrfs/super.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/fs/btrfs/super.c 2015-07-27 23:13:08.961010425 +0200
+@@ -492,6 +492,20 @@
+ btrfs_clear_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
+ compress_force = false;
++ } else if (strcmp(args[0].from, "lz4") == 0) {
++ compress_type = "lz4";
++ info->compress_type = BTRFS_COMPRESS_LZ4;
++ btrfs_set_opt(info->mount_opt, COMPRESS);
++ btrfs_clear_opt(info->mount_opt, NODATACOW);
++ btrfs_clear_opt(info->mount_opt, NODATASUM);
++ btrfs_set_fs_incompat(info, COMPRESS_LZ4);
++ } else if (strcmp(args[0].from, "lz4hc") == 0) {
++ compress_type = "lz4hc";
++ info->compress_type = BTRFS_COMPRESS_LZ4HC;
++ btrfs_set_opt(info->mount_opt, COMPRESS);
++ btrfs_clear_opt(info->mount_opt, NODATACOW);
++ btrfs_clear_opt(info->mount_opt, NODATASUM);
++ btrfs_set_fs_incompat(info, COMPRESS_LZ4);
+ } else {
+ ret = -EINVAL;
+ goto out;
+@@ -1051,8 +1065,14 @@
+ if (btrfs_test_opt(root, COMPRESS)) {
+ if (info->compress_type == BTRFS_COMPRESS_ZLIB)
+ compress_type = "zlib";
++ else if (info->compress_type == BTRFS_COMPRESS_LZ4)
++ compress_type = "lz4";
++ else if (info->compress_type == BTRFS_COMPRESS_LZ4HC)
++ compress_type = "lz4hc";
++ else if (info->compress_type == BTRFS_COMPRESS_LZO)
++ compress_type = "lzo";
+ else
+- compress_type = "lzo";
++ compress_type = "none";
+ if (btrfs_test_opt(root, FORCE_COMPRESS))
+ seq_printf(seq, ",compress-force=%s", compress_type);
+ else
+diff -Nur linux-4.1.3/include/dt-bindings/clock/imx6qdl-clock.h linux-xbian-imx6/include/dt-bindings/clock/imx6qdl-clock.h
+--- linux-4.1.3/include/dt-bindings/clock/imx6qdl-clock.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/dt-bindings/clock/imx6qdl-clock.h 2015-07-27 23:13:09.467204382 +0200
+@@ -86,8 +86,6 @@
+ #define IMX6QDL_CLK_GPU3D_SHADER 74
+ #define IMX6QDL_CLK_IPU1_PODF 75
+ #define IMX6QDL_CLK_IPU2_PODF 76
+-#define IMX6QDL_CLK_LDB_DI0_PODF 77
+-#define IMX6QDL_CLK_LDB_DI1_PODF 78
+ #define IMX6QDL_CLK_IPU1_DI0_PRE 79
+ #define IMX6QDL_CLK_IPU1_DI1_PRE 80
+ #define IMX6QDL_CLK_IPU2_DI0_PRE 81
+@@ -251,6 +249,13 @@
+ #define IMX6QDL_CLK_VIDEO_27M 238
+ #define IMX6QDL_CLK_MIPI_CORE_CFG 239
+ #define IMX6QDL_CLK_MIPI_IPG 240
+-#define IMX6QDL_CLK_END 241
++#define IMX6QDL_CLK_LDB_DI0_DIV_7 241
++#define IMX6QDL_CLK_LDB_DI1_DIV_7 242
++#define IMX6QDL_CLK_LDB_DI0_DIV_SEL 243
++#define IMX6QDL_CLK_LDB_DI1_DIV_SEL 244
++#define IMX6QDL_CLK_DCIC1 245
++#define IMX6QDL_CLK_DCIC2 246
++#define IMX6QDL_CLK_SPDIF_GCLK 247
++#define IMX6QDL_CLK_END 248
+
+ #endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
+diff -Nur linux-4.1.3/include/linux/busfreq-imx6.h linux-xbian-imx6/include/linux/busfreq-imx6.h
+--- linux-4.1.3/include/linux/busfreq-imx6.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/busfreq-imx6.h 2015-07-27 23:13:09.479161721 +0200
+@@ -0,0 +1,23 @@
++/*
++ * Copyright 2012-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ASM_ARCH_MXC_BUSFREQ_H__
++#define __ASM_ARCH_MXC_BUSFREQ_H__
++
++/*
++ * This enumerates busfreq mode.
++ */
++enum bus_freq_mode {
++ BUS_FREQ_HIGH,
++ BUS_FREQ_MED,
++ BUS_FREQ_AUDIO,
++ BUS_FREQ_LOW,
++};
++void request_bus_freq(enum bus_freq_mode mode);
++void release_bus_freq(enum bus_freq_mode mode);
++#endif
+diff -Nur linux-4.1.3/include/linux/cgroup_subsys.h linux-xbian-imx6/include/linux/cgroup_subsys.h
+--- linux-4.1.3/include/linux/cgroup_subsys.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/linux/cgroup_subsys.h 2015-07-27 23:13:09.483147501 +0200
+@@ -35,6 +35,10 @@
+ SUBSYS(net_cls)
+ #endif
+
++#if IS_ENABLED(CONFIG_CGROUP_BFQIO)
++SUBSYS(bfqio)
++#endif
++
+ #if IS_ENABLED(CONFIG_CGROUP_PERF)
+ SUBSYS(perf_event)
+ #endif
+diff -Nur linux-4.1.3/include/linux/device_cooling.h linux-xbian-imx6/include/linux/device_cooling.h
+--- linux-4.1.3/include/linux/device_cooling.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/device_cooling.h 2015-07-27 23:13:09.491119057 +0200
+@@ -0,0 +1,45 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#ifndef __DEVICE_THERMAL_H__
++#define __DEVICE_THERMAL_H__
++
++#include <linux/thermal.h>
++
++#ifdef CONFIG_DEVICE_THERMAL
++int register_devfreq_cooling_notifier(struct notifier_block *nb);
++int unregister_devfreq_cooling_notifier(struct notifier_block *nb);
++struct thermal_cooling_device *devfreq_cooling_register(void);
++void devfreq_cooling_unregister(struct thermal_cooling_device *cdev);
++#else
++static inline
++int register_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline
++int unregister_devfreq_cooling_notifier(struct notifier_block *nb)
++{
++ return 0;
++}
++
++static inline
++struct thermal_cooling_device *devfreq_cooling_register(void)
++{
++ return NULL;
++}
++
++static inline
++void devfreq_cooling_unregister(struct thermal_cooling_device *cdev)
++{
++ return;
++}
++#endif
++#endif /* __DEVICE_THERMAL_H__ */
+diff -Nur linux-4.1.3/include/linux/fb.h linux-xbian-imx6/include/linux/fb.h
+--- linux-4.1.3/include/linux/fb.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/linux/fb.h 2015-07-27 23:13:09.495104834 +0200
+@@ -702,7 +702,8 @@
+ #define FB_MODE_IS_VESA 4
+ #define FB_MODE_IS_CALCULATED 8
+ #define FB_MODE_IS_FIRST 16
+-#define FB_MODE_IS_FROM_VAR 32
++#define FB_MODE_IS_FROM_VAR 32
++#define FB_MODE_IS_3D 64
+
+ extern int fbmon_dpms(const struct fb_info *fb_info);
+ extern int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var,
+diff -Nur linux-4.1.3/include/linux/ipu.h linux-xbian-imx6/include/linux/ipu.h
+--- linux-4.1.3/include/linux/ipu.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/ipu.h 2015-07-27 23:13:09.542934188 +0200
+@@ -0,0 +1,38 @@
++/*
++ * Copyright 2005-2013 Freescale Semiconductor, Inc.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*!
++ * @defgroup IPU MXC Image Processing Unit (IPU) Driver
++ */
++/*!
++ * @file linux/ipu.h
++ *
++ * @brief This file contains the IPU driver API declarations.
++ *
++ * @ingroup IPU
++ */
++
++#ifndef __LINUX_IPU_H__
++#define __LINUX_IPU_H__
++
++#include <linux/interrupt.h>
++#include <uapi/linux/ipu.h>
++
++unsigned int fmt_to_bpp(unsigned int pixelformat);
++cs_t colorspaceofpixel(int fmt);
++int need_csc(int ifmt, int ofmt);
++
++int ipu_queue_task(struct ipu_task *task);
++int ipu_check_task(struct ipu_task *task);
++
++#endif
+diff -Nur linux-4.1.3/include/linux/ipu-v3.h linux-xbian-imx6/include/linux/ipu-v3.h
+--- linux-4.1.3/include/linux/ipu-v3.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/ipu-v3.h 2015-07-27 23:13:09.542934188 +0200
+@@ -0,0 +1,752 @@
++/*
++ * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++ * for more details.
++ */
++
++#ifndef __LINUX_IPU_V3_H_
++#define __LINUX_IPU_V3_H_
++
++#include <linux/ipu.h>
++
++/* IPU Driver channels definitions. */
++/* Note these are different from IDMA channels */
++#define IPU_MAX_CH 32
++#define _MAKE_CHAN(num, v_in, g_in, a_in, out) \
++ ((num << 24) | (v_in << 18) | (g_in << 12) | (a_in << 6) | out)
++#define _MAKE_ALT_CHAN(ch) (ch | (IPU_MAX_CH << 24))
++#define IPU_CHAN_ID(ch) (ch >> 24)
++#define IPU_CHAN_ALT(ch) (ch & 0x02000000)
++#define IPU_CHAN_ALPHA_IN_DMA(ch) ((uint32_t) (ch >> 6) & 0x3F)
++#define IPU_CHAN_GRAPH_IN_DMA(ch) ((uint32_t) (ch >> 12) & 0x3F)
++#define IPU_CHAN_VIDEO_IN_DMA(ch) ((uint32_t) (ch >> 18) & 0x3F)
++#define IPU_CHAN_OUT_DMA(ch) ((uint32_t) (ch & 0x3F))
++#define NO_DMA 0x3F
++#define ALT 1
++/*!
++ * Enumeration of IPU logical channels. An IPU logical channel is defined as a
++ * combination of an input (memory to IPU), output (IPU to memory), and/or
++ * secondary input IDMA channels and in some cases an Image Converter task.
++ * Some channels consist of only an input or output.
++ */
++typedef enum {
++ CHAN_NONE = -1,
++ MEM_ROT_ENC_MEM = _MAKE_CHAN(1, 45, NO_DMA, NO_DMA, 48),
++ MEM_ROT_VF_MEM = _MAKE_CHAN(2, 46, NO_DMA, NO_DMA, 49),
++ MEM_ROT_PP_MEM = _MAKE_CHAN(3, 47, NO_DMA, NO_DMA, 50),
++
++ MEM_PRP_ENC_MEM = _MAKE_CHAN(4, 12, 14, 17, 20),
++ MEM_PRP_VF_MEM = _MAKE_CHAN(5, 12, 14, 17, 21),
++ MEM_PP_MEM = _MAKE_CHAN(6, 11, 15, 18, 22),
++
++ MEM_DC_SYNC = _MAKE_CHAN(7, 28, NO_DMA, NO_DMA, NO_DMA),
++ MEM_DC_ASYNC = _MAKE_CHAN(8, 41, NO_DMA, NO_DMA, NO_DMA),
++ MEM_BG_SYNC = _MAKE_CHAN(9, 23, NO_DMA, 51, NO_DMA),
++ MEM_FG_SYNC = _MAKE_CHAN(10, 27, NO_DMA, 31, NO_DMA),
++
++ MEM_BG_ASYNC0 = _MAKE_CHAN(11, 24, NO_DMA, 52, NO_DMA),
++ MEM_FG_ASYNC0 = _MAKE_CHAN(12, 29, NO_DMA, 33, NO_DMA),
++ MEM_BG_ASYNC1 = _MAKE_ALT_CHAN(MEM_BG_ASYNC0),
++ MEM_FG_ASYNC1 = _MAKE_ALT_CHAN(MEM_FG_ASYNC0),
++
++ DIRECT_ASYNC0 = _MAKE_CHAN(13, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++ DIRECT_ASYNC1 = _MAKE_CHAN(14, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++
++ CSI_MEM0 = _MAKE_CHAN(15, NO_DMA, NO_DMA, NO_DMA, 0),
++ CSI_MEM1 = _MAKE_CHAN(16, NO_DMA, NO_DMA, NO_DMA, 1),
++ CSI_MEM2 = _MAKE_CHAN(17, NO_DMA, NO_DMA, NO_DMA, 2),
++ CSI_MEM3 = _MAKE_CHAN(18, NO_DMA, NO_DMA, NO_DMA, 3),
++
++ CSI_MEM = CSI_MEM0,
++
++ CSI_PRP_ENC_MEM = _MAKE_CHAN(19, NO_DMA, NO_DMA, NO_DMA, 20),
++ CSI_PRP_VF_MEM = _MAKE_CHAN(20, NO_DMA, NO_DMA, NO_DMA, 21),
++
++ /* for vdi mem->vdi->ic->mem , add graphics plane and alpha*/
++ MEM_VDI_PRP_VF_MEM_P = _MAKE_CHAN(21, 8, 14, 17, 21),
++ MEM_VDI_PRP_VF_MEM = _MAKE_CHAN(22, 9, 14, 17, 21),
++ MEM_VDI_PRP_VF_MEM_N = _MAKE_CHAN(23, 10, 14, 17, 21),
++
++ /* for vdi mem->vdi->mem */
++ MEM_VDI_MEM_P = _MAKE_CHAN(24, 8, NO_DMA, NO_DMA, 5),
++ MEM_VDI_MEM = _MAKE_CHAN(25, 9, NO_DMA, NO_DMA, 5),
++ MEM_VDI_MEM_N = _MAKE_CHAN(26, 10, NO_DMA, NO_DMA, 5),
++
++ /* fake channel for vdoa to link with IPU */
++ MEM_VDOA_MEM = _MAKE_CHAN(27, NO_DMA, NO_DMA, NO_DMA, NO_DMA),
++
++ MEM_PP_ADC = CHAN_NONE,
++ ADC_SYS2 = CHAN_NONE,
++
++} ipu_channel_t;
++
++/*!
++ * Enumeration of types of buffers for a logical channel.
++ */
++typedef enum {
++ IPU_OUTPUT_BUFFER = 0, /*!< Buffer for output from IPU */
++ IPU_ALPHA_IN_BUFFER = 1, /*!< Buffer for input to IPU */
++ IPU_GRAPH_IN_BUFFER = 2, /*!< Buffer for input to IPU */
++ IPU_VIDEO_IN_BUFFER = 3, /*!< Buffer for input to IPU */
++ IPU_INPUT_BUFFER = IPU_VIDEO_IN_BUFFER,
++ IPU_SEC_INPUT_BUFFER = IPU_GRAPH_IN_BUFFER,
++} ipu_buffer_t;
++
++#define IPU_PANEL_SERIAL 1
++#define IPU_PANEL_PARALLEL 2
++
++/*!
++ * Enumeration of ADC channel operation mode.
++ */
++typedef enum {
++ Disable,
++ WriteTemplateNonSeq,
++ ReadTemplateNonSeq,
++ WriteTemplateUnCon,
++ ReadTemplateUnCon,
++ WriteDataWithRS,
++ WriteDataWoRS,
++ WriteCmd
++} mcu_mode_t;
++
++/*!
++ * Enumeration of ADC channel addressing mode.
++ */
++typedef enum {
++ FullWoBE,
++ FullWithBE,
++ XY
++} display_addressing_t;
++
++/*!
++ * Union of initialization parameters for a logical channel.
++ */
++typedef union {
++ struct {
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ bool interlaced;
++ } csi_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ } csi_prp_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ } mem_prp_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ } mem_rot_enc_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ ipu_motion_sel motion_sel;
++ enum v4l2_field field_fmt;
++ uint32_t csi;
++ uint32_t mipi_id;
++ uint32_t mipi_vc;
++ bool mipi_en;
++ } csi_prp_vf_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ display_port_t disp;
++ uint32_t out_left;
++ uint32_t out_top;
++ } csi_prp_vf_adc;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ ipu_motion_sel motion_sel;
++ enum v4l2_field field_fmt;
++ } mem_prp_vf_mem;
++ struct {
++ uint32_t temp;
++ } mem_prp_vf_adc;
++ struct {
++ uint32_t temp;
++ } mem_rot_vf_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ uint32_t outh_resize_ratio;
++ uint32_t outv_resize_ratio;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ uint32_t in_g_pixel_fmt;
++ uint8_t alpha;
++ uint32_t key_color;
++ bool alpha_chan_en;
++ } mem_pp_mem;
++ struct {
++ uint32_t temp;
++ } mem_rot_mem;
++ struct {
++ uint32_t in_width;
++ uint32_t in_height;
++ uint32_t in_pixel_fmt;
++ uint32_t out_width;
++ uint32_t out_height;
++ uint32_t out_pixel_fmt;
++ bool graphics_combine_en;
++ bool global_alpha_en;
++ bool key_color_en;
++ display_port_t disp;
++ uint32_t out_left;
++ uint32_t out_top;
++ } mem_pp_adc;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ } mem_dc_sync;
++ struct {
++ uint32_t temp;
++ } mem_sdc_fg;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ bool alpha_chan_en;
++ } mem_dp_bg_sync;
++ struct {
++ uint32_t temp;
++ } mem_sdc_bg;
++ struct {
++ uint32_t di;
++ bool interlaced;
++ uint32_t in_pixel_fmt;
++ uint32_t out_pixel_fmt;
++ bool alpha_chan_en;
++ } mem_dp_fg_sync;
++ struct {
++ uint32_t di;
++ } direct_async;
++ struct {
++ display_port_t disp;
++ mcu_mode_t ch_mode;
++ uint32_t out_left;
++ uint32_t out_top;
++ } adc_sys1;
++ struct {
++ display_port_t disp;
++ mcu_mode_t ch_mode;
++ uint32_t out_left;
++ uint32_t out_top;
++ } adc_sys2;
++} ipu_channel_params_t;
++
++/*
++ * IPU_IRQF_ONESHOT - Interrupt is not reenabled after the irq handler finished.
++ */
++#define IPU_IRQF_NONE 0x00000000
++#define IPU_IRQF_ONESHOT 0x00000001
++
++/*!
++ * Enumeration of IPU interrupt sources.
++ */
++enum ipu_irq_line {
++ IPU_IRQ_CSI0_OUT_EOF = 0,
++ IPU_IRQ_CSI1_OUT_EOF = 1,
++ IPU_IRQ_CSI2_OUT_EOF = 2,
++ IPU_IRQ_CSI3_OUT_EOF = 3,
++ IPU_IRQ_VDIC_OUT_EOF = 5,
++ IPU_IRQ_VDI_P_IN_EOF = 8,
++ IPU_IRQ_VDI_C_IN_EOF = 9,
++ IPU_IRQ_VDI_N_IN_EOF = 10,
++ IPU_IRQ_PP_IN_EOF = 11,
++ IPU_IRQ_PRP_IN_EOF = 12,
++ IPU_IRQ_PRP_GRAPH_IN_EOF = 14,
++ IPU_IRQ_PP_GRAPH_IN_EOF = 15,
++ IPU_IRQ_PRP_ALPHA_IN_EOF = 17,
++ IPU_IRQ_PP_ALPHA_IN_EOF = 18,
++ IPU_IRQ_PRP_ENC_OUT_EOF = 20,
++ IPU_IRQ_PRP_VF_OUT_EOF = 21,
++ IPU_IRQ_PP_OUT_EOF = 22,
++ IPU_IRQ_BG_SYNC_EOF = 23,
++ IPU_IRQ_BG_ASYNC_EOF = 24,
++ IPU_IRQ_FG_SYNC_EOF = 27,
++ IPU_IRQ_DC_SYNC_EOF = 28,
++ IPU_IRQ_FG_ASYNC_EOF = 29,
++ IPU_IRQ_FG_ALPHA_SYNC_EOF = 31,
++
++ IPU_IRQ_FG_ALPHA_ASYNC_EOF = 33,
++ IPU_IRQ_DC_READ_EOF = 40,
++ IPU_IRQ_DC_ASYNC_EOF = 41,
++ IPU_IRQ_DC_CMD1_EOF = 42,
++ IPU_IRQ_DC_CMD2_EOF = 43,
++ IPU_IRQ_DC_MASK_EOF = 44,
++ IPU_IRQ_PRP_ENC_ROT_IN_EOF = 45,
++ IPU_IRQ_PRP_VF_ROT_IN_EOF = 46,
++ IPU_IRQ_PP_ROT_IN_EOF = 47,
++ IPU_IRQ_PRP_ENC_ROT_OUT_EOF = 48,
++ IPU_IRQ_PRP_VF_ROT_OUT_EOF = 49,
++ IPU_IRQ_PP_ROT_OUT_EOF = 50,
++ IPU_IRQ_BG_ALPHA_SYNC_EOF = 51,
++ IPU_IRQ_BG_ALPHA_ASYNC_EOF = 52,
++
++ IPU_IRQ_BG_SYNC_NFACK = 64 + 23,
++ IPU_IRQ_FG_SYNC_NFACK = 64 + 27,
++ IPU_IRQ_DC_SYNC_NFACK = 64 + 28,
++
++ IPU_IRQ_DP_SF_START = 448 + 2,
++ IPU_IRQ_DP_SF_END = 448 + 3,
++ IPU_IRQ_BG_SF_END = IPU_IRQ_DP_SF_END,
++ IPU_IRQ_DC_FC_0 = 448 + 8,
++ IPU_IRQ_DC_FC_1 = 448 + 9,
++ IPU_IRQ_DC_FC_2 = 448 + 10,
++ IPU_IRQ_DC_FC_3 = 448 + 11,
++ IPU_IRQ_DC_FC_4 = 448 + 12,
++ IPU_IRQ_DC_FC_6 = 448 + 13,
++ IPU_IRQ_VSYNC_PRE_0 = 448 + 14,
++ IPU_IRQ_VSYNC_PRE_1 = 448 + 15,
++
++ IPU_IRQ_COUNT
++};
++
++/*!
++ * Bitfield of Display Interface signal polarities.
++ */
++typedef struct {
++ unsigned datamask_en:1;
++ unsigned int_clk:1;
++ unsigned interlaced:1;
++ unsigned odd_field_first:1;
++ unsigned clksel_en:1;
++ unsigned clkidle_en:1;
++ unsigned data_pol:1; /* true = inverted */
++ unsigned clk_pol:1; /* true = rising edge */
++ unsigned enable_pol:1;
++ unsigned Hsync_pol:1; /* true = active high */
++ unsigned Vsync_pol:1;
++} ipu_di_signal_cfg_t;
++
++/*!
++ * Bitfield of CSI signal polarities and modes.
++ */
++
++typedef struct {
++ unsigned data_width:4;
++ unsigned clk_mode:3;
++ unsigned ext_vsync:1;
++ unsigned Vsync_pol:1;
++ unsigned Hsync_pol:1;
++ unsigned pixclk_pol:1;
++ unsigned data_pol:1;
++ unsigned sens_clksrc:1;
++ unsigned pack_tight:1;
++ unsigned force_eof:1;
++ unsigned data_en_pol:1;
++ unsigned data_fmt;
++ unsigned csi;
++ unsigned mclk;
++} ipu_csi_signal_cfg_t;
++
++/*!
++ * Enumeration of CSI data bus widths.
++ */
++enum {
++ IPU_CSI_DATA_WIDTH_4 = 0,
++ IPU_CSI_DATA_WIDTH_8 = 1,
++ IPU_CSI_DATA_WIDTH_10 = 3,
++ IPU_CSI_DATA_WIDTH_16 = 9,
++};
++
++/*!
++ * Enumeration of CSI clock modes.
++ */
++enum {
++ IPU_CSI_CLK_MODE_GATED_CLK,
++ IPU_CSI_CLK_MODE_NONGATED_CLK,
++ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE,
++ IPU_CSI_CLK_MODE_CCIR656_INTERLACED,
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_DDR,
++ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR,
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_DDR,
++ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR,
++};
++
++enum {
++ IPU_CSI_MIPI_DI0,
++ IPU_CSI_MIPI_DI1,
++ IPU_CSI_MIPI_DI2,
++ IPU_CSI_MIPI_DI3,
++};
++
++typedef enum {
++ RGB,
++ YCbCr,
++ YUV
++} ipu_color_space_t;
++
++/*!
++ * Enumeration of ADC vertical sync mode.
++ */
++typedef enum {
++ VsyncNone,
++ VsyncInternal,
++ VsyncCSI,
++ VsyncExternal
++} vsync_t;
++
++typedef enum {
++ DAT,
++ CMD
++} cmddata_t;
++
++/*!
++ * Enumeration of ADC display update mode.
++ */
++typedef enum {
++ IPU_ADC_REFRESH_NONE,
++ IPU_ADC_AUTO_REFRESH,
++ IPU_ADC_AUTO_REFRESH_SNOOP,
++ IPU_ADC_SNOOPING,
++} ipu_adc_update_mode_t;
++
++/*!
++ * Enumeration of ADC display interface types (serial or parallel).
++ */
++enum {
++ IPU_ADC_IFC_MODE_SYS80_TYPE1,
++ IPU_ADC_IFC_MODE_SYS80_TYPE2,
++ IPU_ADC_IFC_MODE_SYS68K_TYPE1,
++ IPU_ADC_IFC_MODE_SYS68K_TYPE2,
++ IPU_ADC_IFC_MODE_3WIRE_SERIAL,
++ IPU_ADC_IFC_MODE_4WIRE_SERIAL,
++ IPU_ADC_IFC_MODE_5WIRE_SERIAL_CLK,
++ IPU_ADC_IFC_MODE_5WIRE_SERIAL_CS,
++};
++
++enum {
++ IPU_ADC_IFC_WIDTH_8,
++ IPU_ADC_IFC_WIDTH_16,
++};
++
++/*!
++ * Enumeration of ADC display interface burst mode.
++ */
++enum {
++ IPU_ADC_BURST_WCS,
++ IPU_ADC_BURST_WBLCK,
++ IPU_ADC_BURST_NONE,
++ IPU_ADC_BURST_SERIAL,
++};
++
++/*!
++ * Enumeration of ADC display interface RW signal timing modes.
++ */
++enum {
++ IPU_ADC_SER_NO_RW,
++ IPU_ADC_SER_RW_BEFORE_RS,
++ IPU_ADC_SER_RW_AFTER_RS,
++};
++
++/*!
++ * Bitfield of ADC signal polarities and modes.
++ */
++typedef struct {
++ unsigned data_pol:1;
++ unsigned clk_pol:1;
++ unsigned cs_pol:1;
++ unsigned rs_pol:1;
++ unsigned addr_pol:1;
++ unsigned read_pol:1;
++ unsigned write_pol:1;
++ unsigned Vsync_pol:1;
++ unsigned burst_pol:1;
++ unsigned burst_mode:2;
++ unsigned ifc_mode:3;
++ unsigned ifc_width:5;
++ unsigned ser_preamble_len:4;
++ unsigned ser_preamble:8;
++ unsigned ser_rw_mode:2;
++} ipu_adc_sig_cfg_t;
++
++/*!
++ * Enumeration of ADC template commands.
++ */
++enum {
++ RD_DATA,
++ RD_ACK,
++ RD_WAIT,
++ WR_XADDR,
++ WR_YADDR,
++ WR_ADDR,
++ WR_CMND,
++ WR_DATA,
++};
++
++/*!
++ * Enumeration of ADC template command flow control.
++ */
++enum {
++ SINGLE_STEP,
++ PAUSE,
++ STOP,
++};
++
++
++/*Define template constants*/
++#define ATM_ADDR_RANGE 0x20 /*offset address of DISP */
++#define TEMPLATE_BUF_SIZE 0x20 /*size of template */
++
++/*!
++ * Define to create ADC template command entry.
++ */
++#define ipu_adc_template_gen(oc, rs, fc, dat) (((rs) << 29) | ((fc) << 27) | \
++ ((oc) << 24) | (dat))
++
++typedef struct {
++ u32 reg;
++ u32 value;
++} ipu_lpmc_reg_t;
++
++#define IPU_LPMC_REG_READ 0x80000000L
++
++#define CSI_MCLK_VF 1
++#define CSI_MCLK_ENC 2
++#define CSI_MCLK_RAW 4
++#define CSI_MCLK_I2C 8
++
++struct ipu_soc;
++/* Common IPU API */
++struct ipu_soc *ipu_get_soc(int id);
++int32_t ipu_init_channel(struct ipu_soc *ipu, ipu_channel_t channel, ipu_channel_params_t *params);
++void ipu_uninit_channel(struct ipu_soc *ipu, ipu_channel_t channel);
++void ipu_disable_hsp_clk(struct ipu_soc *ipu);
++
++static inline bool ipu_can_rotate_in_place(ipu_rotate_mode_t rot)
++{
++#ifdef CONFIG_MXC_IPU_V3D
++ return (rot < IPU_ROTATE_HORIZ_FLIP);
++#else
++ return (rot < IPU_ROTATE_90_RIGHT);
++#endif
++}
++
++int32_t ipu_init_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ ipu_rotate_mode_t rot_mode,
++ dma_addr_t phyaddr_0, dma_addr_t phyaddr_1,
++ dma_addr_t phyaddr_2,
++ uint32_t u_offset, uint32_t v_offset);
++
++int32_t ipu_update_channel_buffer(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum, dma_addr_t phyaddr);
++
++int32_t ipu_update_channel_offset(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t pixel_fmt,
++ uint16_t width, uint16_t height,
++ uint32_t stride,
++ uint32_t u, uint32_t v,
++ uint32_t vertical_offset, uint32_t horizontal_offset);
++
++int32_t ipu_select_buffer(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t bufNum);
++int32_t ipu_select_multi_vdi_buffer(struct ipu_soc *ipu, uint32_t bufNum);
++
++int32_t ipu_link_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch);
++int32_t ipu_unlink_channels(struct ipu_soc *ipu, ipu_channel_t src_ch, ipu_channel_t dest_ch);
++
++int32_t ipu_is_channel_busy(struct ipu_soc *ipu, ipu_channel_t channel);
++int32_t ipu_check_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum);
++void ipu_clear_buffer_ready(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type,
++ uint32_t bufNum);
++uint32_t ipu_get_cur_buffer_idx(struct ipu_soc *ipu, ipu_channel_t channel, ipu_buffer_t type);
++int32_t ipu_enable_channel(struct ipu_soc *ipu, ipu_channel_t channel);
++int32_t ipu_disable_channel(struct ipu_soc *ipu, ipu_channel_t channel, bool wait_for_stop);
++int32_t ipu_swap_channel(struct ipu_soc *ipu, ipu_channel_t from_ch, ipu_channel_t to_ch);
++uint32_t ipu_channel_status(struct ipu_soc *ipu, ipu_channel_t channel);
++
++int32_t ipu_enable_csi(struct ipu_soc *ipu, uint32_t csi);
++int32_t ipu_disable_csi(struct ipu_soc *ipu, uint32_t csi);
++
++int ipu_lowpwr_display_enable(void);
++int ipu_lowpwr_display_disable(void);
++
++int ipu_enable_irq(struct ipu_soc *ipu, uint32_t irq);
++void ipu_disable_irq(struct ipu_soc *ipu, uint32_t irq);
++void ipu_clear_irq(struct ipu_soc *ipu, uint32_t irq);
++int ipu_request_irq(struct ipu_soc *ipu, uint32_t irq,
++ irqreturn_t(*handler) (int, void *),
++ uint32_t irq_flags, const char *devname, void *dev_id);
++void ipu_free_irq(struct ipu_soc *ipu, uint32_t irq, void *dev_id);
++bool ipu_get_irq_status(struct ipu_soc *ipu, uint32_t irq);
++void ipu_set_csc_coefficients(struct ipu_soc *ipu, ipu_channel_t channel, int32_t param[][3]);
++int32_t ipu_set_channel_bandmode(struct ipu_soc *ipu, ipu_channel_t channel,
++ ipu_buffer_t type, uint32_t band_height);
++
++/* two stripe calculations */
++struct stripe_param{
++ unsigned int input_width; /* width of the input stripe */
++ unsigned int output_width; /* width of the output stripe */
++ unsigned int input_column; /* the first column on the input stripe */
++ unsigned int output_column; /* the first column on the output stripe */
++ unsigned int idr;
++ /* inverse downisizing ratio parameter; expressed as a power of 2 */
++ unsigned int irr;
++ /* inverse resizing ratio parameter; expressed as a multiple of 2^-13 */
++};
++int ipu_calc_stripes_sizes(const unsigned int input_frame_width,
++ unsigned int output_frame_width,
++ const unsigned int maximal_stripe_width,
++ const unsigned long long cirr,
++ const unsigned int equal_stripes,
++ u32 input_pixelformat,
++ u32 output_pixelformat,
++ struct stripe_param *left,
++ struct stripe_param *right);
++
++/* SDC API */
++int32_t ipu_init_sync_panel(struct ipu_soc *ipu, int disp,
++ uint32_t pixel_clk,
++ uint16_t width, uint16_t height,
++ uint32_t pixel_fmt,
++ uint16_t h_start_width, uint16_t h_sync_width,
++ uint16_t h_end_width, uint16_t v_start_width,
++ uint16_t v_sync_width, uint16_t v_end_width,
++ uint32_t v_to_h_sync, ipu_di_signal_cfg_t sig);
++
++void ipu_uninit_sync_panel(struct ipu_soc *ipu, int disp);
++
++int32_t ipu_disp_set_window_pos(struct ipu_soc *ipu, ipu_channel_t channel, int16_t x_pos,
++ int16_t y_pos);
++int32_t ipu_disp_get_window_pos(struct ipu_soc *ipu, ipu_channel_t channel, int16_t *x_pos,
++ int16_t *y_pos);
++int32_t ipu_disp_set_global_alpha(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ uint8_t alpha);
++int32_t ipu_disp_set_color_key(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ uint32_t colorKey);
++int32_t ipu_disp_set_gamma_correction(struct ipu_soc *ipu, ipu_channel_t channel, bool enable,
++ int constk[], int slopek[]);
++
++int ipu_init_async_panel(struct ipu_soc *ipu, int disp, int type, uint32_t cycle_time,
++ uint32_t pixel_fmt, ipu_adc_sig_cfg_t sig);
++void ipu_disp_direct_write(struct ipu_soc *ipu, ipu_channel_t channel, u32 value, u32 offset);
++void ipu_reset_disp_panel(struct ipu_soc *ipu);
++
++/* CMOS Sensor Interface API */
++int32_t ipu_csi_init_interface(struct ipu_soc *ipu, uint16_t width, uint16_t height,
++ uint32_t pixel_fmt, ipu_csi_signal_cfg_t sig);
++
++int32_t ipu_csi_get_sensor_protocol(struct ipu_soc *ipu, uint32_t csi);
++
++int32_t ipu_csi_enable_mclk(struct ipu_soc *ipu, int src, bool flag, bool wait);
++
++static inline int32_t ipu_csi_enable_mclk_if(struct ipu_soc *ipu, int src, uint32_t csi,
++ bool flag, bool wait)
++{
++ return ipu_csi_enable_mclk(ipu, csi, flag, wait);
++}
++
++int ipu_csi_read_mclk_flag(void);
++
++void ipu_csi_flash_strobe(bool flag);
++
++void ipu_csi_get_window_size(struct ipu_soc *ipu, uint32_t *width, uint32_t *height, uint32_t csi);
++
++void ipu_csi_set_window_size(struct ipu_soc *ipu, uint32_t width, uint32_t height, uint32_t csi);
++
++void ipu_csi_set_window_pos(struct ipu_soc *ipu, uint32_t left, uint32_t top, uint32_t csi);
++
++uint32_t bytes_per_pixel(uint32_t fmt);
++
++bool ipu_ch_param_bad_alpha_pos(uint32_t fmt);
++
++struct ipuv3_fb_platform_data {
++ char disp_dev[32];
++ u32 interface_pix_fmt;
++ char *mode_str;
++ int default_bpp;
++ bool int_clk;
++
++ /* reserved mem */
++ resource_size_t res_base[2];
++ resource_size_t res_size[2];
++
++ /*
++ * Late init to avoid display channel being
++ * re-initialized as we've probably setup the
++ * channel in bootloader.
++ */
++ bool late_init;
++};
++
++#endif /* __LINUX_IPU_V3_H_ */
+diff -Nur linux-4.1.3/include/linux/mfd/mxc-hdmi-core.h linux-xbian-imx6/include/linux/mfd/mxc-hdmi-core.h
+--- linux-4.1.3/include/linux/mfd/mxc-hdmi-core.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/mfd/mxc-hdmi-core.h 2015-07-27 23:13:09.586777757 +0200
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef __LINUX_MXC_HDMI_CORE_H_
++#define __LINUX_MXC_HDMI_CORE_H_
++
++#include <video/mxc_edid.h>
++
++#include <sound/core.h>
++#include <sound/pcm.h>
++#include <sound/pcm_params.h>
++#include <sound/soc.h>
++
++#define IRQ_DISABLE_SUCCEED 0
++#define IRQ_DISABLE_FAIL 1
++
++bool hdmi_check_overflow(void);
++
++u8 hdmi_readb(unsigned int reg);
++void hdmi_writeb(u8 value, unsigned int reg);
++void hdmi_mask_writeb(u8 data, unsigned int addr, u8 shift, u8 mask);
++unsigned int hdmi_read4(unsigned int reg);
++void hdmi_write4(unsigned int value, unsigned int reg);
++
++void hdmi_irq_init(void);
++void hdmi_irq_enable(int irq);
++unsigned int hdmi_irq_disable(int irq);
++
++void hdmi_set_sample_rate(unsigned int rate);
++void hdmi_set_dma_mode(unsigned int dma_running);
++void hdmi_init_clk_regenerator(void);
++void hdmi_clk_regenerator_update_pixel_clock(u32 pixclock);
++
++void hdmi_set_edid_cfg(int edid_status, struct mxc_edid_cfg *cfg);
++int hdmi_get_edid_cfg(struct mxc_edid_cfg *cfg);
++
++extern int mxc_hdmi_ipu_id;
++extern int mxc_hdmi_disp_id;
++
++void hdmi_set_registered(int registered);
++int hdmi_get_registered(void);
++int mxc_hdmi_abort_stream(void);
++int mxc_hdmi_register_audio(struct snd_pcm_substream *substream);
++void mxc_hdmi_unregister_audio(struct snd_pcm_substream *substream);
++void hdmi_set_dvi_mode(unsigned int state);
++unsigned int hdmi_set_cable_state(unsigned int state);
++unsigned int hdmi_set_blank_state(unsigned int state);
++int check_hdmi_state(void);
++
++void hdmi_cec_start_device(void);
++void hdmi_cec_stop_device(void);
++
++#endif
+diff -Nur linux-4.1.3/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h linux-xbian-imx6/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+--- linux-4.1.3/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h 2015-07-27 23:13:09.590763539 +0200
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (C) 2012 Freescale Semiconductor, Inc.
++ * Copyright (C) 2012-2014 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+@@ -123,6 +123,9 @@
+ #define IMX6Q_GPR1_USB_OTG_ID_SEL_ENET_RX_ER 0x0
+ #define IMX6Q_GPR1_USB_OTG_ID_SEL_GPIO_1 BIT(13)
+ #define IMX6Q_GPR1_GINT BIT(12)
++#define IMX6Q_GPR1_GINT_MASK BIT(12)
++#define IMX6Q_GPR1_GINT_CLEAR 0x0
++#define IMX6Q_GPR1_GINT_ASSERT BIT(12)
+ #define IMX6Q_GPR1_ADDRS3_MASK (0x3 << 10)
+ #define IMX6Q_GPR1_ADDRS3_32MB (0x0 << 10)
+ #define IMX6Q_GPR1_ADDRS3_64MB (0x1 << 10)
+@@ -285,15 +288,15 @@
+ #define IMX6Q_GPR10_OCRAM_TZ_ADDR_MASK (0x3f << 5)
+ #define IMX6Q_GPR10_OCRAM_TZ_EN_MASK BIT(4)
+ #define IMX6Q_GPR10_DCIC2_MUX_CTL_MASK (0x3 << 2)
+-#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI0 (0x0 << 2)
+-#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI1 (0x1 << 2)
+-#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU2_DI0 (0x2 << 2)
+-#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU2_DI1 (0x3 << 2)
++#define IMX6Q_GPR10_DCIC2_MUX_CTL_IPU1_DI1 (0x0 << 2)
++#define IMX6Q_GPR10_DCIC2_MUX_CTL_LVDS0 (0x1 << 2)
++#define IMX6Q_GPR10_DCIC2_MUX_CTL_LVDS1 (0x2 << 2)
++#define IMX6Q_GPR10_DCIC2_MUX_CTL_MIPI (0x3 << 2)
+ #define IMX6Q_GPR10_DCIC1_MUX_CTL_MASK (0x3 << 0)
+ #define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI0 (0x0 << 0)
+-#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU1_DI1 (0x1 << 0)
+-#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU2_DI0 (0x2 << 0)
+-#define IMX6Q_GPR10_DCIC1_MUX_CTL_IPU2_DI1 (0x3 << 0)
++#define IMX6Q_GPR10_DCIC1_MUX_CTL_LVDS0 (0x1 << 0)
++#define IMX6Q_GPR10_DCIC1_MUX_CTL_LVDS1 (0x2 << 0)
++#define IMX6Q_GPR10_DCIC1_MUX_CTL_HDMI (0x3 << 0)
+
+ #define IMX6Q_GPR12_ARMP_IPG_CLK_EN BIT(27)
+ #define IMX6Q_GPR12_ARMP_AHB_CLK_EN BIT(26)
+@@ -302,6 +305,7 @@
+ #define IMX6Q_GPR12_DEVICE_TYPE (0xf << 12)
+ #define IMX6Q_GPR12_PCIE_CTL_2 BIT(10)
+ #define IMX6Q_GPR12_LOS_LEVEL (0x1f << 4)
++#define IMX6Q_GPR12_LOS_LEVEL_9 (0x9 << 4)
+
+ #define IMX6Q_GPR13_SDMA_STOP_REQ BIT(30)
+ #define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29)
+@@ -392,6 +396,16 @@
+ #define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1)
+ #define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0)
+
++/* For imx6dl iomux gpr register field definitions */
++#define IMX6DL_GPR3_LVDS1_MUX_CTL_MASK (0x3 << 8)
++#define IMX6DL_GPR3_LVDS1_MUX_CTL_IPU1_DI0 (0x0 << 8)
++#define IMX6DL_GPR3_LVDS1_MUX_CTL_IPU1_DI1 (0x1 << 8)
++#define IMX6DL_GPR3_LVDS1_MUX_CTL_LCDIF (0x2 << 8)
++#define IMX6DL_GPR3_LVDS0_MUX_CTL_MASK (0x3 << 6)
++#define IMX6DL_GPR3_LVDS0_MUX_CTL_IPU1_DI0 (0x0 << 6)
++#define IMX6DL_GPR3_LVDS0_MUX_CTL_IPU1_DI1 (0x1 << 6)
++#define IMX6DL_GPR3_LVDS0_MUX_CTL_LCDIF (0x2 << 6)
++
+ /* For imx6sl iomux gpr register field define */
+ #define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
+ #define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
+@@ -407,6 +421,15 @@
+ #define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
+ #define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
+
++#define IMX6SX_GPR2_MQS_OVERSAMPLE_MASK (0x1 << 26)
++#define IMX6SX_GPR2_MQS_OVERSAMPLE_SHIFT (26)
++#define IMX6SX_GPR2_MQS_EN_MASK (0x1 << 25)
++#define IMX6SX_GPR2_MQS_EN_SHIFT (25)
++#define IMX6SX_GPR2_MQS_SW_RST_MASK (0x1 << 24)
++#define IMX6SX_GPR2_MQS_SW_RST_SHIFT (24)
++#define IMX6SX_GPR2_MQS_CLK_DIV_MASK (0xFF << 16)
++#define IMX6SX_GPR2_MQS_CLK_DIV_SHIFT (16)
++
+ #define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
+ #define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
+
+@@ -435,4 +458,11 @@
+ #define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
+ #define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
+
++#define IMX6SX_GPR5_PCIE_BTNRST BIT(19)
++#define IMX6SX_GPR5_PCIE_PERST BIT(18)
++
++#define IMX6SX_GPR12_PCIE_PM_TURN_OFF BIT(16)
++#define IMX6SX_GPR12_PCIE_TEST_PD BIT(30)
++#define IMX6SX_GPR12_RX_EQ_MASK (0x7 << 0)
++#define IMX6SX_GPR12_RX_EQ_2 (0x2 << 0)
+ #endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
+diff -Nur linux-4.1.3/include/linux/mipi_csi2.h linux-xbian-imx6/include/linux/mipi_csi2.h
+--- linux-4.1.3/include/linux/mipi_csi2.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/mipi_csi2.h 2015-07-27 23:13:09.598735098 +0200
+@@ -0,0 +1,93 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __INCLUDE_MIPI_CSI2_H
++#define __INCLUDE_MIPI_CSI2_H
++
++/* MIPI CSI2 registers */
++#define MIPI_CSI2_REG(offset) (offset)
++
++#define MIPI_CSI2_VERSION MIPI_CSI2_REG(0x000)
++#define MIPI_CSI2_N_LANES MIPI_CSI2_REG(0x004)
++#define MIPI_CSI2_PHY_SHUTDOWNZ MIPI_CSI2_REG(0x008)
++#define MIPI_CSI2_DPHY_RSTZ MIPI_CSI2_REG(0x00c)
++#define MIPI_CSI2_CSI2_RESETN MIPI_CSI2_REG(0x010)
++#define MIPI_CSI2_PHY_STATE MIPI_CSI2_REG(0x014)
++#define MIPI_CSI2_DATA_IDS_1 MIPI_CSI2_REG(0x018)
++#define MIPI_CSI2_DATA_IDS_2 MIPI_CSI2_REG(0x01c)
++#define MIPI_CSI2_ERR1 MIPI_CSI2_REG(0x020)
++#define MIPI_CSI2_ERR2 MIPI_CSI2_REG(0x024)
++#define MIPI_CSI2_MASK1 MIPI_CSI2_REG(0x028)
++#define MIPI_CSI2_MASK2 MIPI_CSI2_REG(0x02c)
++#define MIPI_CSI2_PHY_TST_CTRL0 MIPI_CSI2_REG(0x030)
++#define MIPI_CSI2_PHY_TST_CTRL1 MIPI_CSI2_REG(0x034)
++#define MIPI_CSI2_SFT_RESET MIPI_CSI2_REG(0xf00)
++
++/* mipi data type */
++#define MIPI_DT_YUV420 0x18 /* YYY.../UYVY.... */
++#define MIPI_DT_YUV420_LEGACY 0x1a /* UYY.../VYY... */
++#define MIPI_DT_YUV422 0x1e /* UYVY... */
++#define MIPI_DT_RGB444 0x20
++#define MIPI_DT_RGB555 0x21
++#define MIPI_DT_RGB565 0x22
++#define MIPI_DT_RGB666 0x23
++#define MIPI_DT_RGB888 0x24
++#define MIPI_DT_RAW6 0x28
++#define MIPI_DT_RAW7 0x29
++#define MIPI_DT_RAW8 0x2a
++#define MIPI_DT_RAW10 0x2b
++#define MIPI_DT_RAW12 0x2c
++#define MIPI_DT_RAW14 0x2d
++
++
++struct mipi_csi2_info;
++/* mipi csi2 API */
++struct mipi_csi2_info *mipi_csi2_get_info(void);
++
++bool mipi_csi2_enable(struct mipi_csi2_info *info);
++
++bool mipi_csi2_disable(struct mipi_csi2_info *info);
++
++bool mipi_csi2_get_status(struct mipi_csi2_info *info);
++
++int mipi_csi2_get_bind_ipu(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_bind_csi(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_virtual_channel(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_set_lanes(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_set_datatype(struct mipi_csi2_info *info,
++ unsigned int datatype);
++
++unsigned int mipi_csi2_get_datatype(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_dphy_status(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_error1(struct mipi_csi2_info *info);
++
++unsigned int mipi_csi2_get_error2(struct mipi_csi2_info *info);
++
++int mipi_csi2_pixelclk_enable(struct mipi_csi2_info *info);
++
++void mipi_csi2_pixelclk_disable(struct mipi_csi2_info *info);
++
++int mipi_csi2_reset(struct mipi_csi2_info *info);
++
++#endif
+diff -Nur linux-4.1.3/include/linux/mipi_dsi.h linux-xbian-imx6/include/linux/mipi_dsi.h
+--- linux-4.1.3/include/linux/mipi_dsi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/mipi_dsi.h 2015-07-27 23:13:09.598735098 +0200
+@@ -0,0 +1,171 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __INCLUDE_MIPI_DSI_H
++#define __INCLUDE_MIPI_DSI_H
++
++#define MIPI_DSI_VERSION (0x000)
++#define MIPI_DSI_PWR_UP (0x004)
++#define MIPI_DSI_CLKMGR_CFG (0x008)
++#define MIPI_DSI_DPI_CFG (0x00c)
++#define MIPI_DSI_DBI_CFG (0x010)
++#define MIPI_DSI_DBIS_CMDSIZE (0x014)
++#define MIPI_DSI_PCKHDL_CFG (0x018)
++#define MIPI_DSI_VID_MODE_CFG (0x01c)
++#define MIPI_DSI_VID_PKT_CFG (0x020)
++#define MIPI_DSI_CMD_MODE_CFG (0x024)
++#define MIPI_DSI_TMR_LINE_CFG (0x028)
++#define MIPI_DSI_VTIMING_CFG (0x02c)
++#define MIPI_DSI_PHY_TMR_CFG (0x030)
++#define MIPI_DSI_GEN_HDR (0x034)
++#define MIPI_DSI_GEN_PLD_DATA (0x038)
++#define MIPI_DSI_CMD_PKT_STATUS (0x03c)
++#define MIPI_DSI_TO_CNT_CFG (0x040)
++#define MIPI_DSI_ERROR_ST0 (0x044)
++#define MIPI_DSI_ERROR_ST1 (0x048)
++#define MIPI_DSI_ERROR_MSK0 (0x04c)
++#define MIPI_DSI_ERROR_MSK1 (0x050)
++#define MIPI_DSI_PHY_RSTZ (0x054)
++#define MIPI_DSI_PHY_IF_CFG (0x058)
++#define MIPI_DSI_PHY_IF_CTRL (0x05c)
++#define MIPI_DSI_PHY_STATUS (0x060)
++#define MIPI_DSI_PHY_TST_CTRL0 (0x064)
++#define MIPI_DSI_PHY_TST_CTRL1 (0x068)
++
++#define DSI_PWRUP_RESET (0x0 << 0)
++#define DSI_PWRUP_POWERUP (0x1 << 0)
++
++#define DSI_DPI_CFG_VID_SHIFT (0)
++#define DSI_DPI_CFG_VID_MASK (0x3)
++#define DSI_DPI_CFG_COLORCODE_SHIFT (2)
++#define DSI_DPI_CFG_COLORCODE_MASK (0x7)
++#define DSI_DPI_CFG_DATAEN_ACT_LOW (0x1 << 5)
++#define DSI_DPI_CFG_DATAEN_ACT_HIGH (0x0 << 5)
++#define DSI_DPI_CFG_VSYNC_ACT_LOW (0x1 << 6)
++#define DSI_DPI_CFG_VSYNC_ACT_HIGH (0x0 << 6)
++#define DSI_DPI_CFG_HSYNC_ACT_LOW (0x1 << 7)
++#define DSI_DPI_CFG_HSYNC_ACT_HIGH (0x0 << 7)
++#define DSI_DPI_CFG_SHUTD_ACT_LOW (0x1 << 8)
++#define DSI_DPI_CFG_SHUTD_ACT_HIGH (0x0 << 8)
++#define DSI_DPI_CFG_COLORMODE_ACT_LOW (0x1 << 9)
++#define DSI_DPI_CFG_COLORMODE_ACT_HIGH (0x0 << 9)
++#define DSI_DPI_CFG_EN18LOOSELY (0x1 << 10)
++
++#define DSI_PCKHDL_CFG_EN_EOTP_TX (0x1 << 0)
++#define DSI_PCKHDL_CFG_EN_EOTP_RX (0x1 << 1)
++#define DSI_PCKHDL_CFG_EN_BTA (0x1 << 2)
++#define DSI_PCKHDL_CFG_EN_ECC_RX (0x1 << 3)
++#define DSI_PCKHDL_CFG_EN_CRC_RX (0x1 << 4)
++#define DSI_PCKHDL_CFG_GEN_VID_RX_MASK (0x3)
++#define DSI_PCKHDL_CFG_GEN_VID_RX_SHIFT (5)
++
++#define DSI_VID_MODE_CFG_EN (0x1 << 0)
++#define DSI_VID_MODE_CFG_EN_BURSTMODE (0x3 << 1)
++#define DSI_VID_MODE_CFG_TYPE_MASK (0x3)
++#define DSI_VID_MODE_CFG_TYPE_SHIFT (1)
++#define DSI_VID_MODE_CFG_EN_LP_VSA (0x1 << 3)
++#define DSI_VID_MODE_CFG_EN_LP_VBP (0x1 << 4)
++#define DSI_VID_MODE_CFG_EN_LP_VFP (0x1 << 5)
++#define DSI_VID_MODE_CFG_EN_LP_VACT (0x1 << 6)
++#define DSI_VID_MODE_CFG_EN_LP_HBP (0x1 << 7)
++#define DSI_VID_MODE_CFG_EN_LP_HFP (0x1 << 8)
++#define DSI_VID_MODE_CFG_EN_MULTI_PKT (0x1 << 9)
++#define DSI_VID_MODE_CFG_EN_NULL_PKT (0x1 << 10)
++#define DSI_VID_MODE_CFG_EN_FRAME_ACK (0x1 << 11)
++#define DSI_VID_MODE_CFG_EN_LP_MODE (DSI_VID_MODE_CFG_EN_LP_VSA | \
++ DSI_VID_MODE_CFG_EN_LP_VBP | \
++ DSI_VID_MODE_CFG_EN_LP_VFP | \
++ DSI_VID_MODE_CFG_EN_LP_HFP | \
++ DSI_VID_MODE_CFG_EN_LP_HBP | \
++ DSI_VID_MODE_CFG_EN_LP_VACT)
++
++
++
++#define DSI_VID_PKT_CFG_VID_PKT_SZ_MASK (0x7ff)
++#define DSI_VID_PKT_CFG_VID_PKT_SZ_SHIFT (0)
++#define DSI_VID_PKT_CFG_NUM_CHUNKS_MASK (0x3ff)
++#define DSI_VID_PKT_CFG_NUM_CHUNKS_SHIFT (11)
++#define DSI_VID_PKT_CFG_NULL_PKT_SZ_MASK (0x3ff)
++#define DSI_VID_PKT_CFG_NULL_PKT_SZ_SHIFT (21)
++
++#define MIPI_DSI_CMD_MODE_CFG_EN_LOWPOWER (0x1FFF)
++#define MIPI_DSI_CMD_MODE_CFG_EN_CMD_MODE (0x1 << 0)
++
++#define DSI_TME_LINE_CFG_HSA_TIME_MASK (0x1ff)
++#define DSI_TME_LINE_CFG_HSA_TIME_SHIFT (0)
++#define DSI_TME_LINE_CFG_HBP_TIME_MASK (0x1ff)
++#define DSI_TME_LINE_CFG_HBP_TIME_SHIFT (9)
++#define DSI_TME_LINE_CFG_HLINE_TIME_MASK (0x3fff)
++#define DSI_TME_LINE_CFG_HLINE_TIME_SHIFT (18)
++
++#define DSI_VTIMING_CFG_VSA_LINES_MASK (0xf)
++#define DSI_VTIMING_CFG_VSA_LINES_SHIFT (0)
++#define DSI_VTIMING_CFG_VBP_LINES_MASK (0x3f)
++#define DSI_VTIMING_CFG_VBP_LINES_SHIFT (4)
++#define DSI_VTIMING_CFG_VFP_LINES_MASK (0x3f)
++#define DSI_VTIMING_CFG_VFP_LINES_SHIFT (10)
++#define DSI_VTIMING_CFG_V_ACT_LINES_MASK (0x7ff)
++#define DSI_VTIMING_CFG_V_ACT_LINES_SHIFT (16)
++
++#define DSI_PHY_TMR_CFG_BTA_TIME_MASK (0xfff)
++#define DSI_PHY_TMR_CFG_BTA_TIME_SHIFT (0)
++#define DSI_PHY_TMR_CFG_LP2HS_TIME_MASK (0xff)
++#define DSI_PHY_TMR_CFG_LP2HS_TIME_SHIFT (12)
++#define DSI_PHY_TMR_CFG_HS2LP_TIME_MASK (0xff)
++#define DSI_PHY_TMR_CFG_HS2LP_TIME_SHIFT (20)
++
++#define DSI_PHY_IF_CFG_N_LANES_MASK (0x3)
++#define DSI_PHY_IF_CFG_N_LANES_SHIFT (0)
++#define DSI_PHY_IF_CFG_WAIT_TIME_MASK (0xff)
++#define DSI_PHY_IF_CFG_WAIT_TIME_SHIFT (2)
++
++#define DSI_PHY_RSTZ_EN_CLK (0x1 << 2)
++#define DSI_PHY_RSTZ_DISABLE_RST (0x1 << 1)
++#define DSI_PHY_RSTZ_DISABLE_SHUTDOWN (0x1 << 0)
++#define DSI_PHY_RSTZ_RST (0x0)
++
++#define DSI_PHY_STATUS_LOCK (0x1 << 0)
++#define DSI_PHY_STATUS_STOPSTATE_CLK_LANE (0x1 << 2)
++
++#define DSI_GEN_HDR_TYPE_MASK (0xff)
++#define DSI_GEN_HDR_TYPE_SHIFT (0)
++#define DSI_GEN_HDR_DATA_MASK (0xffff)
++#define DSI_GEN_HDR_DATA_SHIFT (8)
++
++#define DSI_CMD_PKT_STATUS_GEN_CMD_EMPTY (0x1 << 0)
++#define DSI_CMD_PKT_STATUS_GEN_CMD_FULL (0x1 << 1)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_W_EMPTY (0x1 << 2)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_W_FULL (0x1 << 3)
++#define DSI_CMD_PKT_STATUS_GEN_PLD_R_EMPTY (0x1 << 4)
++#define DSI_CMD_PKT_STATUS_GEN_RD_CMD_BUSY (0x1 << 6)
++
++#define DSI_ERROR_MSK0_ALL_MASK (0x1fffff)
++#define DSI_ERROR_MSK1_ALL_MASK (0x3ffff)
++
++#define DSI_PHY_IF_CTRL_RESET (0x0)
++#define DSI_PHY_IF_CTRL_TX_REQ_CLK_HS (0x1 << 0)
++#define DSI_PHY_IF_CTRL_TX_REQ_CLK_ULPS (0x1 << 1)
++#define DSI_PHY_IF_CTRL_TX_EXIT_CLK_ULPS (0x1 << 2)
++#define DSI_PHY_IF_CTRL_TX_REQ_DATA_ULPS (0x1 << 3)
++#define DSI_PHY_IF_CTRL_TX_EXIT_DATA_ULPS (0x1 << 4)
++#define DSI_PHY_IF_CTRL_TX_TRIG_MASK (0xF)
++#define DSI_PHY_IF_CTRL_TX_TRIG_SHIFT (5)
++
++#define DSI_PHY_CLK_INIT_COMMAND (0x44)
++#define DSI_GEN_PLD_DATA_BUF_SIZE (0x4)
++#endif
+diff -Nur linux-4.1.3/include/linux/mmc/host.h linux-xbian-imx6/include/linux/mmc/host.h
+--- linux-4.1.3/include/linux/mmc/host.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/linux/mmc/host.h 2015-07-27 23:13:09.602720875 +0200
+@@ -300,6 +300,11 @@
+ unsigned long clkgate_delay;
+ #endif
+
++ /* card specific properties to deal with power and reset */
++ struct regulator *card_regulator; /* External VCC needed by the card */
++ struct gpio_desc *card_reset_gpios[2]; /* External resets, active low */
++ struct clk *card_clk; /* External clock needed by the card */
++
+ /* host specific block data */
+ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
+ unsigned short max_segs; /* see blk_queue_max_segments */
+diff -Nur linux-4.1.3/include/linux/mm_types.h linux-xbian-imx6/include/linux/mm_types.h
+--- linux-4.1.3/include/linux/mm_types.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/linux/mm_types.h 2015-07-27 23:13:09.602720875 +0200
+@@ -23,7 +23,7 @@
+ struct address_space;
+ struct mem_cgroup;
+
+-#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
++#define USE_SPLIT_PTE_PTLOCKS (0)
+ #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
+ IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
+ #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
+diff -Nur linux-4.1.3/include/linux/mxc_asrc.h linux-xbian-imx6/include/linux/mxc_asrc.h
+--- linux-4.1.3/include/linux/mxc_asrc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/mxc_asrc.h 2015-07-27 23:13:09.606706653 +0200
+@@ -0,0 +1,389 @@
++/*
++ * Copyright 2008-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ *
++ * @file mxc_asrc.h
++ *
++ * @brief i.MX Asynchronous Sample Rate Converter
++ *
++ * @ingroup Audio
++ */
++
++#ifndef __MXC_ASRC_H__
++#define __MXC_ASRC_H__
++
++#include <uapi/linux/mxc_asrc.h>
++#include <linux/scatterlist.h>
++
++#define ASRC_DMA_BUFFER_NUM 2
++#define ASRC_INPUTFIFO_THRESHOLD 32
++#define ASRC_OUTPUTFIFO_THRESHOLD 32
++#define ASRC_FIFO_THRESHOLD_MIN 0
++#define ASRC_FIFO_THRESHOLD_MAX 63
++#define ASRC_DMA_BUFFER_SIZE (1024 * 48 * 4)
++#define ASRC_MAX_BUFFER_SIZE (1024 * 48)
++#define ASRC_OUTPUT_LAST_SAMPLE_DEFAULT_MAX 32
++#define ASRC_OUTPUT_LAST_SAMPLE_DEFAULT 16
++
++
++/* Ideal Ratio mode doesn't care the outclk frequency, so be fixed */
++#define ASRC_PRESCALER_IDEAL_RATIO 5
++/* SPDIF rxclk pulse rate is 128 * samplerate, so 2 ^ 7 */
++#define ASRC_PRESCALER_SPDIF_RX 7
++/* SPDIF txclk pulse rate is 64 * samplerate, so 2 ^ 6 */
++#define ASRC_PRESCALER_SPDIF_TX 6
++/* I2S bclk is 16 * 2 = 32, so 2 ^ 5 */
++#define ASRC_PRESCALER_I2S_16BIT 5
++/* I2S bclk is 24 * 2 = 48 -> 64, so 2 ^ 6 */
++#define ASRC_PRESCALER_I2S_24BIT 6
++
++
++#define REG_ASRCTR 0x00
++#define REG_ASRIER 0x04
++#define REG_ASRCNCR 0x0C
++#define REG_ASRCFG 0x10
++#define REG_ASRCSR 0x14
++
++#define REG_ASRCDR1 0x18
++#define REG_ASRCDR2 0x1C
++#define REG_ASRCDR(x) ((x < 2) ? REG_ASRCDR1 : REG_ASRCDR2)
++
++#define REG_ASRSTR 0x20
++#define REG_ASRRA 0x24
++#define REG_ASRRB 0x28
++#define REG_ASRRC 0x2C
++#define REG_ASRPM1 0x40
++#define REG_ASRPM2 0x44
++#define REG_ASRPM3 0x48
++#define REG_ASRPM4 0x4C
++#define REG_ASRPM5 0x50
++#define REG_ASRTFR1 0x54
++#define REG_ASRCCR 0x5C
++
++#define REG_ASRDIA 0x60
++#define REG_ASRDOA 0x64
++#define REG_ASRDIB 0x68
++#define REG_ASRDOB 0x6C
++#define REG_ASRDIC 0x70
++#define REG_ASRDOC 0x74
++#define REG_ASRDI(x) (REG_ASRDIA + (x << 3))
++#define REG_ASRDO(x) (REG_ASRDOA + (x << 3))
++
++#define REG_ASRIDRHA 0x80
++#define REG_ASRIDRLA 0x84
++#define REG_ASRIDRHB 0x88
++#define REG_ASRIDRLB 0x8C
++#define REG_ASRIDRHC 0x90
++#define REG_ASRIDRLC 0x94
++#define REG_ASRIDRH(x) (REG_ASRIDRHA + (x << 3))
++#define REG_ASRIDRL(x) (REG_ASRIDRLA + (x << 3))
++
++#define REG_ASR76K 0x98
++#define REG_ASR56K 0x9C
++
++#define REG_ASRMCRA 0xA0
++#define REG_ASRFSTA 0xA4
++#define REG_ASRMCRB 0xA8
++#define REG_ASRFSTB 0xAC
++#define REG_ASRMCRC 0xB0
++#define REG_ASRFSTC 0xB4
++#define REG_ASRMCR(x) (REG_ASRMCRA + (x << 3))
++#define REG_ASRFST(x) (REG_ASRFSTA + (x << 3))
++
++#define REG_ASRMCR1A 0xC0
++#define REG_ASRMCR1B 0xC4
++#define REG_ASRMCR1C 0xC8
++#define REG_ASRMCR1(x) (REG_ASRMCR1A + (x << 2))
++
++
++/* REG0 0x00 REG_ASRCTR */
++#define ASRCTR_ATSx_SHIFT(x) (20 + x)
++#define ASRCTR_ATSx_MASK(x) (1 << ASRCTR_ATSx_SHIFT(x))
++#define ASRCTR_ATS(x) (1 << ASRCTR_ATSx_SHIFT(x))
++#define ASRCTR_USRx_SHIFT(x) (14 + (x << 1))
++#define ASRCTR_USRx_MASK(x) (1 << ASRCTR_USRx_SHIFT(x))
++#define ASRCTR_USR(x) (1 << ASRCTR_USRx_SHIFT(x))
++#define ASRCTR_IDRx_SHIFT(x) (13 + (x << 1))
++#define ASRCTR_IDRx_MASK(x) (1 << ASRCTR_IDRx_SHIFT(x))
++#define ASRCTR_IDR(x) (1 << ASRCTR_IDRx_SHIFT(x))
++#define ASRCTR_SRST_SHIFT 4
++#define ASRCTR_SRST_MASK (1 << ASRCTR_SRST_SHIFT)
++#define ASRCTR_SRST (1 << ASRCTR_SRST_SHIFT)
++#define ASRCTR_ASRCEx_SHIFT(x) (1 + x)
++#define ASRCTR_ASRCEx_MASK(x) (1 << ASRCTR_ASRCEx_SHIFT(x))
++#define ASRCTR_ASRCE(x) (1 << ASRCTR_ASRCEx_SHIFT(x))
++#define ASRCTR_ASRCEx_ALL_MASK (0x7 << ASRCTR_ASRCEx_SHIFT(0))
++#define ASRCTR_ASRCEN_SHIFT 0
++#define ASRCTR_ASRCEN_MASK (1 << ASRCTR_ASRCEN_SHIFT)
++#define ASRCTR_ASRCEN (1 << ASRCTR_ASRCEN_SHIFT)
++
++/* REG1 0x04 REG_ASRIER */
++#define ASRIER_AFPWE_SHIFT 7
++#define ASRIER_AFPWE_MASK (1 << ASRIER_AFPWE_SHIFT)
++#define ASRIER_AFPWE (1 << ASRIER_AFPWE_SHIFT)
++#define ASRIER_AOLIE_SHIFT 6
++#define ASRIER_AOLIE_MASK (1 << ASRIER_AOLIE_SHIFT)
++#define ASRIER_AOLIE (1 << ASRIER_AOLIE_SHIFT)
++#define ASRIER_ADOEx_SHIFT(x) (3 + x)
++#define ASRIER_ADOEx_MASK(x) (1 << ASRIER_ADOEx_SHIFT(x))
++#define ASRIER_ADOE(x) (1 << ASRIER_ADOEx_SHIFT(x))
++#define ASRIER_ADIEx_SHIFT(x) (0 + x)
++#define ASRIER_ADIEx_MASK(x) (1 << ASRIER_ADIEx_SHIFT(x))
++#define ASRIER_ADIE(x) (1 << ASRIER_ADIEx_SHIFT(x))
++
++/* REG2 0x0C REG_ASRCNCR */
++#define ASRCNCR_ANCx_SHIFT(x, b) (b * x)
++#define ASRCNCR_ANCx_MASK(x, b) (((1 << b) - 1) << ASRCNCR_ANCx_SHIFT(x, b))
++#define ASRCNCR_ANCx_get(x, v, b) ((v & ASRCNCR_ANCx_MASK(x, b)) >> ASRCNCR_ANCx_SHIFT(x, b))
++#define ASRCNCR_ANCx_set(x, v, b) ((v << ASRCNCR_ANCx_SHIFT(x, b)) & ASRCNCR_ANCx_MASK(x, b))
++
++/* REG3 0x10 REG_ASRCFG */
++#define ASRCFG_INIRQx_SHIFT(x) (21 + x)
++#define ASRCFG_INIRQx_MASK(x) (1 << ASRCFG_INIRQx_SHIFT(x))
++#define ASRCFG_INIRQx (1 << ASRCFG_INIRQx_SHIFT(x))
++#define ASRCFG_NDPRx_SHIFT(x) (18 + x)
++#define ASRCFG_NDPRx_MASK(x) (1 << ASRCFG_NDPRx_SHIFT(x))
++#define ASRCFG_NDPRx (1 << ASRCFG_NDPRx_SHIFT(x))
++#define ASRCFG_POSTMODx_SHIFT(x) (8 + (x << 2))
++#define ASRCFG_POSTMODx_WIDTH 2
++#define ASRCFG_POSTMODx_MASK(x) (((1 << ASRCFG_POSTMODx_WIDTH) - 1) << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMOD(x, v) ((v) << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_UP(x) (0 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_DCON(x) (1 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_POSTMODx_DOWN(x) (2 << ASRCFG_POSTMODx_SHIFT(x))
++#define ASRCFG_PREMODx_SHIFT(x) (6 + (x << 2))
++#define ASRCFG_PREMODx_WIDTH 2
++#define ASRCFG_PREMODx_MASK(x) (((1 << ASRCFG_PREMODx_WIDTH) - 1) << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMOD(x, v) ((v) << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_UP(x) (0 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_DCON(x) (1 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_DOWN(x) (2 << ASRCFG_PREMODx_SHIFT(x))
++#define ASRCFG_PREMODx_BYPASS(x) (3 << ASRCFG_PREMODx_SHIFT(x))
++
++/* REG4 0x14 REG_ASRCSR */
++#define ASRCSR_AxCSx_WIDTH 4
++#define ASRCSR_AxCSx_MASK ((1 << ASRCSR_AxCSx_WIDTH) - 1)
++#define ASRCSR_AOCSx_SHIFT(x) (12 + (x << 2))
++#define ASRCSR_AOCSx_MASK(x) (((1 << ASRCSR_AxCSx_WIDTH) - 1) << ASRCSR_AOCSx_SHIFT(x))
++#define ASRCSR_AOCS(x, v) ((v) << ASRCSR_AOCSx_SHIFT(x))
++#define ASRCSR_AICSx_SHIFT(x) (x << 2)
++#define ASRCSR_AICSx_MASK(x) (((1 << ASRCSR_AxCSx_WIDTH) - 1) << ASRCSR_AICSx_SHIFT(x))
++#define ASRCSR_AICS(x, v) ((v) << ASRCSR_AICSx_SHIFT(x))
++
++/* REG5&6 0x18 & 0x1C REG_ASRCDR1 & ASRCDR2 */
++#define ASRCDRx_AxCPx_WIDTH 3
++#define ASRCDRx_AICPx_SHIFT(x) (0 + (x % 2) * 6)
++#define ASRCDRx_AICPx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AICPx_SHIFT(x))
++#define ASRCDRx_AICP(x, v) ((v) << ASRCDRx_AICPx_SHIFT(x))
++#define ASRCDRx_AICDx_SHIFT(x) (3 + (x % 2) * 6)
++#define ASRCDRx_AICDx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AICDx_SHIFT(x))
++#define ASRCDRx_AICD(x, v) ((v) << ASRCDRx_AICDx_SHIFT(x))
++#define ASRCDRx_AOCPx_SHIFT(x) ((x < 2) ? 12 + x * 6 : 6)
++#define ASRCDRx_AOCPx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AOCPx_SHIFT(x))
++#define ASRCDRx_AOCP(x, v) ((v) << ASRCDRx_AOCPx_SHIFT(x))
++#define ASRCDRx_AOCDx_SHIFT(x) ((x < 2) ? 15 + x * 6 : 9)
++#define ASRCDRx_AOCDx_MASK(x) (((1 << ASRCDRx_AxCPx_WIDTH) - 1) << ASRCDRx_AOCDx_SHIFT(x))
++#define ASRCDRx_AOCD(x, v) ((v) << ASRCDRx_AOCDx_SHIFT(x))
++
++/* REG7 0x20 REG_ASRSTR */
++#define ASRSTR_DSLCNT_SHIFT 21
++#define ASRSTR_DSLCNT_MASK (1 << ASRSTR_DSLCNT_SHIFT)
++#define ASRSTR_DSLCNT (1 << ASRSTR_DSLCNT_SHIFT)
++#define ASRSTR_ATQOL_SHIFT 20
++#define ASRSTR_ATQOL_MASK (1 << ASRSTR_ATQOL_SHIFT)
++#define ASRSTR_ATQOL (1 << ASRSTR_ATQOL_SHIFT)
++#define ASRSTR_AOOLx_SHIFT(x) (17 + x)
++#define ASRSTR_AOOLx_MASK(x) (1 << ASRSTR_AOOLx_SHIFT(x))
++#define ASRSTR_AOOL(x) (1 << ASRSTR_AOOLx_SHIFT(x))
++#define ASRSTR_AIOLx_SHIFT(x) (14 + x)
++#define ASRSTR_AIOLx_MASK(x) (1 << ASRSTR_AIOLx_SHIFT(x))
++#define ASRSTR_AIOL(x) (1 << ASRSTR_AIOLx_SHIFT(x))
++#define ASRSTR_AODOx_SHIFT(x) (11 + x)
++#define ASRSTR_AODOx_MASK(x) (1 << ASRSTR_AODOx_SHIFT(x))
++#define ASRSTR_AODO(x) (1 << ASRSTR_AODOx_SHIFT(x))
++#define ASRSTR_AIDUx_SHIFT(x) (8 + x)
++#define ASRSTR_AIDUx_MASK(x) (1 << ASRSTR_AIDUx_SHIFT(x))
++#define ASRSTR_AIDU(x) (1 << ASRSTR_AIDUx_SHIFT(x))
++#define ASRSTR_FPWT_SHIFT 7
++#define ASRSTR_FPWT_MASK (1 << ASRSTR_FPWT_SHIFT)
++#define ASRSTR_FPWT (1 << ASRSTR_FPWT_SHIFT)
++#define ASRSTR_AOLE_SHIFT 6
++#define ASRSTR_AOLE_MASK (1 << ASRSTR_AOLE_SHIFT)
++#define ASRSTR_AOLE (1 << ASRSTR_AOLE_SHIFT)
++#define ASRSTR_AODEx_SHIFT(x) (3 + x)
++#define ASRSTR_AODFx_MASK(x) (1 << ASRSTR_AODEx_SHIFT(x))
++#define ASRSTR_AODF(x) (1 << ASRSTR_AODEx_SHIFT(x))
++#define ASRSTR_AIDEx_SHIFT(x) (0 + x)
++#define ASRSTR_AIDEx_MASK(x) (1 << ASRSTR_AIDEx_SHIFT(x))
++#define ASRSTR_AIDE(x) (1 << ASRSTR_AIDEx_SHIFT(x))
++
++/* REG10 0x54 REG_ASRTFR1 */
++#define ASRTFR1_TF_BASE_WIDTH 7
++#define ASRTFR1_TF_BASE_SHIFT 6
++#define ASRTFR1_TF_BASE_MASK (((1 << ASRTFR1_TF_BASE_WIDTH) - 1) << ASRTFR1_TF_BASE_SHIFT)
++#define ASRTFR1_TF_BASE(x) ((x) << ASRTFR1_TF_BASE_SHIFT)
++
++/*
++ * REG22 0xA0 REG_ASRMCRA
++ * REG24 0xA8 REG_ASRMCRB
++ * REG26 0xB0 REG_ASRMCRC
++ */
++#define ASRMCRx_ZEROBUFx_SHIFT 23
++#define ASRMCRx_ZEROBUFxCLR_MASK (1 << ASRMCRx_ZEROBUFx_SHIFT)
++#define ASRMCRx_ZEROBUFxCLR (1 << ASRMCRx_ZEROBUFx_SHIFT)
++#define ASRMCRx_EXTTHRSHx_SHIFT 22
++#define ASRMCRx_EXTTHRSHx_MASK (1 << ASRMCRx_EXTTHRSHx_SHIFT)
++#define ASRMCRx_EXTTHRSHx (1 << ASRMCRx_EXTTHRSHx_SHIFT)
++#define ASRMCRx_BUFSTALLx_SHIFT 21
++#define ASRMCRx_BUFSTALLx_MASK (1 << ASRMCRx_BUFSTALLx_SHIFT)
++#define ASRMCRx_BUFSTALLx (1 << ASRMCRx_BUFSTALLx_SHIFT)
++#define ASRMCRx_BYPASSPOLYx_SHIFT 20
++#define ASRMCRx_BYPASSPOLYx_MASK (1 << ASRMCRx_BYPASSPOLYx_SHIFT)
++#define ASRMCRx_BYPASSPOLYx (1 << ASRMCRx_BYPASSPOLYx_SHIFT)
++#define ASRMCRx_OUTFIFO_THRESHOLD_WIDTH 6
++#define ASRMCRx_OUTFIFO_THRESHOLD_SHIFT 12
++#define ASRMCRx_OUTFIFO_THRESHOLD_MASK (((1 << ASRMCRx_OUTFIFO_THRESHOLD_WIDTH) - 1) << ASRMCRx_OUTFIFO_THRESHOLD_SHIFT)
++#define ASRMCRx_OUTFIFO_THRESHOLD(v) (((v) << ASRMCRx_OUTFIFO_THRESHOLD_SHIFT) & ASRMCRx_OUTFIFO_THRESHOLD_MASK)
++#define ASRMCRx_RSYNIFx_SHIFT 11
++#define ASRMCRx_RSYNIFx_MASK (1 << ASRMCRx_RSYNIFx_SHIFT)
++#define ASRMCRx_RSYNIFx (1 << ASRMCRx_RSYNIFx_SHIFT)
++#define ASRMCRx_RSYNOFx_SHIFT 10
++#define ASRMCRx_RSYNOFx_MASK (1 << ASRMCRx_RSYNOFx_SHIFT)
++#define ASRMCRx_RSYNOFx (1 << ASRMCRx_RSYNOFx_SHIFT)
++#define ASRMCRx_INFIFO_THRESHOLD_WIDTH 6
++#define ASRMCRx_INFIFO_THRESHOLD_SHIFT 0
++#define ASRMCRx_INFIFO_THRESHOLD_MASK (((1 << ASRMCRx_INFIFO_THRESHOLD_WIDTH) - 1) << ASRMCRx_INFIFO_THRESHOLD_SHIFT)
++#define ASRMCRx_INFIFO_THRESHOLD(v) (((v) << ASRMCRx_INFIFO_THRESHOLD_SHIFT) & ASRMCRx_INFIFO_THRESHOLD_MASK)
++
++/*
++ * REG23 0xA4 REG_ASRFSTA
++ * REG25 0xAC REG_ASRFSTB
++ * REG27 0xB4 REG_ASRFSTC
++ */
++#define ASRFSTx_OAFx_SHIFT 23
++#define ASRFSTx_OAFx_MASK (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_OAFx (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_OUTPUT_FIFO_WIDTH 7
++#define ASRFSTx_OUTPUT_FIFO_SHIFT 12
++#define ASRFSTx_OUTPUT_FIFO_MASK (((1 << ASRFSTx_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTx_OUTPUT_FIFO_SHIFT)
++#define ASRFSTx_IAEx_SHIFT 11
++#define ASRFSTx_IAEx_MASK (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_IAEx (1 << ASRFSTx_OAFx_SHIFT)
++#define ASRFSTx_INPUT_FIFO_WIDTH 7
++#define ASRFSTx_INPUT_FIFO_SHIFT 0
++#define ASRFSTx_INPUT_FIFO_MASK ((1 << ASRFSTx_INPUT_FIFO_WIDTH) - 1)
++
++/* REG28 0xC0 & 0xC4 & 0xC8 REG_ASRMCR1x */
++#define ASRMCR1x_IWD_WIDTH 3
++#define ASRMCR1x_IWD_SHIFT 9
++#define ASRMCR1x_IWD_MASK (((1 << ASRMCR1x_IWD_WIDTH) - 1) << ASRMCR1x_IWD_SHIFT)
++#define ASRMCR1x_IWD(v) ((v) << ASRMCR1x_IWD_SHIFT)
++#define ASRMCR1x_IMSB_SHIFT 8
++#define ASRMCR1x_IMSB_MASK (1 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_IMSB_MSB (1 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_IMSB_LSB (0 << ASRMCR1x_IMSB_SHIFT)
++#define ASRMCR1x_OMSB_SHIFT 2
++#define ASRMCR1x_OMSB_MASK (1 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OMSB_MSB (1 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OMSB_LSB (0 << ASRMCR1x_OMSB_SHIFT)
++#define ASRMCR1x_OSGN_SHIFT 1
++#define ASRMCR1x_OSGN_MASK (1 << ASRMCR1x_OSGN_SHIFT)
++#define ASRMCR1x_OSGN (1 << ASRMCR1x_OSGN_SHIFT)
++#define ASRMCR1x_OW16_SHIFT 0
++#define ASRMCR1x_OW16_MASK (1 << ASRMCR1x_OW16_SHIFT)
++#define ASRMCR1x_OW16(v) ((v) << ASRMCR1x_OW16_SHIFT)
++
++
++struct dma_block {
++ unsigned int index;
++ unsigned int length;
++ void *dma_vaddr;
++ dma_addr_t dma_paddr;
++ struct list_head queue;
++};
++
++struct asrc_p2p_params {
++ u32 p2p_rate; /* ASRC output rate for p2p */
++ enum asrc_word_width p2p_width; /* ASRC output wordwidth for p2p */
++};
++
++struct asrc_pair_params {
++ enum asrc_pair_index index;
++ struct completion input_complete;
++ struct completion output_complete;
++ struct dma_chan *input_dma_channel;
++ struct dma_chan *output_dma_channel;
++ unsigned int input_buffer_size;
++ unsigned int output_buffer_size;
++ unsigned int buffer_num;
++ unsigned int pair_hold;
++ unsigned int asrc_active;
++ unsigned int channel_nums;
++ struct dma_block input_dma_total;
++ struct dma_block input_dma[ASRC_DMA_BUFFER_NUM];
++ struct dma_block output_dma_total;
++ struct dma_block output_dma[ASRC_DMA_BUFFER_NUM];
++ struct dma_block output_last_period;
++ struct dma_async_tx_descriptor *desc_in;
++ struct dma_async_tx_descriptor *desc_out;
++ unsigned int input_sg_nodes;
++ unsigned int output_sg_nodes;
++ struct scatterlist input_sg[4], output_sg[4];
++ enum asrc_word_width input_word_width;
++ enum asrc_word_width output_word_width;
++ u32 input_sample_rate;
++ u32 output_sample_rate;
++ u32 input_wm;
++ u32 output_wm;
++ unsigned int last_period_sample;
++};
++
++struct asrc_data {
++ struct asrc_pair_params *params[ASRC_PAIR_MAX_NUM];
++ struct asrc_pair asrc_pair[ASRC_PAIR_MAX_NUM];
++ struct proc_dir_entry *proc_asrc;
++ struct class *asrc_class;
++ struct regmap *regmap;
++ struct clk *mem_clk;
++ struct clk *ipg_clk;
++ struct clk *asrck_clk;
++ struct clk *dma_clk;
++ unsigned long paddr;
++ unsigned int channel_bits;
++ int asrc_major;
++ int irq;
++ struct device *dev;
++};
++
++struct asrc_p2p_ops {
++ void (*asrc_p2p_start_conv)(enum asrc_pair_index);
++ void (*asrc_p2p_stop_conv)(enum asrc_pair_index);
++ int (*asrc_p2p_get_dma_request)(enum asrc_pair_index, bool);
++ u32 (*asrc_p2p_per_addr)(enum asrc_pair_index, bool);
++ int (*asrc_p2p_req_pair)(int, enum asrc_pair_index *index);
++ int (*asrc_p2p_config_pair)(struct asrc_config *config);
++ void (*asrc_p2p_release_pair)(enum asrc_pair_index);
++ void (*asrc_p2p_finish_conv)(enum asrc_pair_index);
++};
++
++extern void asrc_p2p_hook(struct asrc_p2p_ops *asrc_p2p_ct);
++
++extern int asrc_req_pair(int chn_num, enum asrc_pair_index *index);
++extern void asrc_release_pair(enum asrc_pair_index index);
++extern int asrc_config_pair(struct asrc_config *config);
++extern void asrc_get_status(struct asrc_status_flags *flags);
++extern void asrc_start_conv(enum asrc_pair_index index);
++extern void asrc_stop_conv(enum asrc_pair_index index);
++extern u32 asrc_get_per_addr(enum asrc_pair_index index, bool i);
++extern int asrc_get_dma_request(enum asrc_pair_index index, bool i);
++extern void asrc_finish_conv(enum asrc_pair_index index);
++extern int asrc_set_watermark(enum asrc_pair_index index,
++ u32 in_wm, u32 out_wm);
++
++#endif/* __MXC_ASRC_H__ */
+diff -Nur linux-4.1.3/include/linux/mxc_dcic.h linux-xbian-imx6/include/linux/mxc_dcic.h
+--- linux-4.1.3/include/linux/mxc_dcic.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/mxc_dcic.h 2015-07-27 23:13:09.606706653 +0200
+@@ -0,0 +1,139 @@
++/*
++ * Copyright (C) 2014 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file linux/mxc_dcic.h
++ *
++ * @brief Global header file for the MXC DCIC driver
++ *
++ * @ingroup MXC DCIC
++ */
++
++#ifndef __LINUX_DCIC_H__
++#define __LINUX_DCIC_H__
++
++#include <uapi/linux/mxc_dcic.h>
++
++#define DCICC_IC_ENABLE 0x1
++#define DCICC_IC_DISABLE 0x0
++#define DCICC_IC_MASK 0x1
++#define DCICC_DE_ACTIVE_HIGH 0
++#define DCICC_DE_ACTIVE_LOW (0x1 << 4)
++#define DCICC_DE_ACTIVE_MASK (0x1 << 4)
++#define DCICC_HSYNC_POL_ACTIVE_HIGH 0
++#define DCICC_HSYNC_POL_ACTIVE_LOW (0x1 << 5)
++#define DCICC_HSYNC_POL_ACTIVE_MASK (0x1 << 5)
++#define DCICC_VSYNC_POL_ACTIVE_HIGH 0
++#define DCICC_VSYNC_POL_ACTIVE_LOW (0x1 << 6)
++#define DCICC_VSYNC_POL_ACTIVE_MASK (0x1 << 6)
++#define DCICC_CLK_POL_NO_INVERTED 0
++#define DCICC_CLK_POL_INVERTED (0x1 << 7)
++#define DCICC_CLK_POL_INVERTED_MASK (0x1 << 7)
++
++#define DCICIC_ERROR_INT_DISABLE 1
++#define DCICIC_ERROR_INT_ENABLE 0
++#define DCICIC_ERROR_INT_MASK_MASK 1
++#define DCICIC_FUN_INT_DISABLE (0x1 << 1)
++#define DCICIC_FUN_INT_ENABLE 0
++#define DCICIC_FUN_INT_MASK (0x1 << 1)
++#define DCICIC_FREEZE_MASK_CHANGED 0
++#define DCICIC_FREEZE_MASK_FORZEN (0x1 << 3)
++#define DCICIC_FREEZE_MASK_MASK (0x1 << 3)
++#define DCICIC_EXT_SIG_EX_DISABLE 0
++#define DCICIC_EXT_SIG_EN_ENABLE (0x1 << 16)
++#define DCICIC_EXT_SIG_EN_MASK (0x1 << 16)
++
++#define DCICS_ROI_MATCH_STAT_MASK 0xFFFF
++#define DCICS_EI_STAT_PENDING (0x1 << 16)
++#define DCICS_EI_STAT_NO_PENDING 0
++#define DCICS_FI_STAT_PENDING (0x1 << 17)
++#define DCICS_FI_STAT_NO_PENDING 0
++
++#define DCICRC_ROI_START_OFFSET_X_MASK 0x1FFF
++#define DCICRC_ROI_START_OFFSET_X_SHIFT 0
++#define DCICRC_ROI_START_OFFSET_Y_MASK (0xFFF << 16)
++#define DCICRC_ROI_START_OFFSET_Y_SHIFT 16
++#define DCICRC_ROI_CHANGED 0
++#define DCICRC_ROI_FROZEN (0x1 << 30)
++#define DCICRC_ROI_ENABLE (0x1 << 31)
++#define DCICRC_ROI_DISABLE 0
++
++#define DCICRS_ROI_END_OFFSET_X_MASK 0x1FFF
++#define DCICRS_ROI_END_OFFSET_X_SHIFT 0
++#define DCICRS_ROI_END_OFFSET_Y_MASK (0xFFF << 16)
++#define DCICRS_ROI_END_OFFSET_Y_SHIFT 16
++
++struct roi_regs {
++ u32 dcicrc;
++ u32 dcicrs;
++ u32 dcicrrs;
++ u32 dcicrcs;
++};
++
++struct dcic_regs {
++ u32 dcicc;
++ u32 dcicic;
++ u32 dcics;
++ u32 dcic_reserved;
++ struct roi_regs ROI[16];
++};
++
++struct dcic_mux {
++ char dcic[16];
++ u32 val;
++};
++
++struct bus_mux {
++ char name[16];
++ int reg;
++ int shift;
++ int mask;
++ int dcic_mux_num;
++ const struct dcic_mux *dcics;
++};
++
++struct dcic_info {
++ int bus_mux_num;
++ const struct bus_mux *buses;
++};
++
++struct dcic_data {
++ struct regmap *regmap;
++ struct device *dev;
++ struct dcic_regs *regs;
++ const struct bus_mux *buses;
++ u32 bus_n;
++ u32 mux_n;
++ struct clk *disp_axi_clk;
++ struct clk *dcic_clk;
++ struct mutex lock;
++ struct completion roi_crc_comp;
++ struct class *class;
++ int major;
++ struct cdev cdev; /* Char device structure */
++ dev_t devt;
++ unsigned int result;
++};
++
++struct dcic_private {
++ struct dcic_data *dcic;
++ u16 client_id;
++};
++#endif
+diff -Nur linux-4.1.3/include/linux/mxc_vpu.h linux-xbian-imx6/include/linux/mxc_vpu.h
+--- linux-4.1.3/include/linux/mxc_vpu.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/mxc_vpu.h 2015-07-27 23:13:09.606706653 +0200
+@@ -0,0 +1,118 @@
++/*
++ * Copyright 2004-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU Lesser General
++ * Public License. You may obtain a copy of the GNU Lesser General
++ * Public License Version 2.1 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/lgpl-license.html
++ * http://www.gnu.org/copyleft/lgpl.html
++ */
++
++/*!
++ * @defgroup VPU Video Processor Unit Driver
++ */
++
++/*!
++ * @file linux/mxc_vpu.h
++ *
++ * @brief VPU system initialization and file operation definition
++ *
++ * @ingroup VPU
++ */
++
++#ifndef __LINUX_MXC_VPU_H__
++#define __LINUX_MXC_VPU_H__
++
++#include <linux/fs.h>
++
++struct mxc_vpu_platform_data {
++ bool iram_enable;
++ int iram_size;
++ void (*reset) (void);
++ void (*pg) (int);
++};
++
++struct vpu_mem_desc {
++ u32 size;
++ dma_addr_t phy_addr;
++ u32 cpu_addr; /* cpu address to free the dma mem */
++ u32 virt_uaddr; /* virtual user space address */
++};
++
++#define VPU_IOC_MAGIC 'V'
++
++#define VPU_IOC_PHYMEM_ALLOC _IO(VPU_IOC_MAGIC, 0)
++#define VPU_IOC_PHYMEM_FREE _IO(VPU_IOC_MAGIC, 1)
++#define VPU_IOC_WAIT4INT _IO(VPU_IOC_MAGIC, 2)
++#define VPU_IOC_PHYMEM_DUMP _IO(VPU_IOC_MAGIC, 3)
++#define VPU_IOC_REG_DUMP _IO(VPU_IOC_MAGIC, 4)
++#define VPU_IOC_IRAM_SETTING _IO(VPU_IOC_MAGIC, 6)
++#define VPU_IOC_CLKGATE_SETTING _IO(VPU_IOC_MAGIC, 7)
++#define VPU_IOC_GET_WORK_ADDR _IO(VPU_IOC_MAGIC, 8)
++#define VPU_IOC_REQ_VSHARE_MEM _IO(VPU_IOC_MAGIC, 9)
++#define VPU_IOC_SYS_SW_RESET _IO(VPU_IOC_MAGIC, 11)
++#define VPU_IOC_GET_SHARE_MEM _IO(VPU_IOC_MAGIC, 12)
++#define VPU_IOC_QUERY_BITWORK_MEM _IO(VPU_IOC_MAGIC, 13)
++#define VPU_IOC_SET_BITWORK_MEM _IO(VPU_IOC_MAGIC, 14)
++#define VPU_IOC_PHYMEM_CHECK _IO(VPU_IOC_MAGIC, 15)
++#define VPU_IOC_LOCK_DEV _IO(VPU_IOC_MAGIC, 16)
++
++#define BIT_CODE_RUN 0x000
++#define BIT_CODE_DOWN 0x004
++#define BIT_INT_CLEAR 0x00C
++#define BIT_INT_STATUS 0x010
++#define BIT_CUR_PC 0x018
++#define BIT_INT_REASON 0x174
++
++#define MJPEG_PIC_STATUS_REG 0x3004
++#define MBC_SET_SUBBLK_EN 0x4A0
++
++#define BIT_WORK_CTRL_BUF_BASE 0x100
++#define BIT_WORK_CTRL_BUF_REG(i) (BIT_WORK_CTRL_BUF_BASE + i * 4)
++#define BIT_CODE_BUF_ADDR BIT_WORK_CTRL_BUF_REG(0)
++#define BIT_WORK_BUF_ADDR BIT_WORK_CTRL_BUF_REG(1)
++#define BIT_PARA_BUF_ADDR BIT_WORK_CTRL_BUF_REG(2)
++#define BIT_BIT_STREAM_CTRL BIT_WORK_CTRL_BUF_REG(3)
++#define BIT_FRAME_MEM_CTRL BIT_WORK_CTRL_BUF_REG(4)
++#define BIT_BIT_STREAM_PARAM BIT_WORK_CTRL_BUF_REG(5)
++
++#ifndef CONFIG_SOC_IMX6Q
++#define BIT_RESET_CTRL 0x11C
++#else
++#define BIT_RESET_CTRL 0x128
++#endif
++
++/* i could be 0, 1, 2, 3 */
++#define BIT_RD_PTR_BASE 0x120
++#define BIT_RD_PTR_REG(i) (BIT_RD_PTR_BASE + i * 8)
++#define BIT_WR_PTR_REG(i) (BIT_RD_PTR_BASE + i * 8 + 4)
++
++/* i could be 0, 1, 2, 3 */
++#define BIT_FRM_DIS_FLG_BASE (cpu_is_mx51() ? 0x150 : 0x140)
++#define BIT_FRM_DIS_FLG_REG(i) (BIT_FRM_DIS_FLG_BASE + i * 4)
++
++#define BIT_BUSY_FLAG 0x160
++#define BIT_RUN_COMMAND 0x164
++#define BIT_INT_ENABLE 0x170
++
++#define BITVAL_PIC_RUN 8
++
++#define VPU_SLEEP_REG_VALUE 10
++#define VPU_WAKE_REG_VALUE 11
++
++int vl2cc_init(u32 vl2cc_hw_base);
++void vl2cc_enable(void);
++void vl2cc_flush(void);
++void vl2cc_disable(void);
++void vl2cc_cleanup(void);
++
++int vl2cc_init(u32 vl2cc_hw_base);
++void vl2cc_enable(void);
++void vl2cc_flush(void);
++void vl2cc_disable(void);
++void vl2cc_cleanup(void);
++
++#endif
+diff -Nur linux-4.1.3/include/linux/platform_data/dma-imx.h linux-xbian-imx6/include/linux/platform_data/dma-imx.h
+--- linux-4.1.3/include/linux/platform_data/dma-imx.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/linux/platform_data/dma-imx.h 2015-07-27 23:13:09.622649772 +0200
+@@ -17,31 +17,30 @@
+ * This enumerates peripheral types. Used for SDMA.
+ */
+ enum sdma_peripheral_type {
+- IMX_DMATYPE_SSI, /* MCU domain SSI */
+- IMX_DMATYPE_SSI_SP, /* Shared SSI */
+- IMX_DMATYPE_MMC, /* MMC */
+- IMX_DMATYPE_SDHC, /* SDHC */
+- IMX_DMATYPE_UART, /* MCU domain UART */
+- IMX_DMATYPE_UART_SP, /* Shared UART */
+- IMX_DMATYPE_FIRI, /* FIRI */
+- IMX_DMATYPE_CSPI, /* MCU domain CSPI */
+- IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
+- IMX_DMATYPE_SIM, /* SIM */
+- IMX_DMATYPE_ATA, /* ATA */
+- IMX_DMATYPE_CCM, /* CCM */
+- IMX_DMATYPE_EXT, /* External peripheral */
+- IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
+- IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
+- IMX_DMATYPE_DSP, /* DSP */
+- IMX_DMATYPE_MEMORY, /* Memory */
+- IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
+- IMX_DMATYPE_SPDIF, /* SPDIF */
+- IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
+- IMX_DMATYPE_ASRC, /* ASRC */
+- IMX_DMATYPE_ESAI, /* ESAI */
+- IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
+- IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
+- IMX_DMATYPE_SAI, /* SAI */
++ IMX_DMATYPE_SSI, /* MCU domain SSI */
++ IMX_DMATYPE_SSI_SP, /* Shared SSI */
++ IMX_DMATYPE_MMC, /* MMC */
++ IMX_DMATYPE_SDHC, /* SDHC */
++ IMX_DMATYPE_UART, /* MCU domain UART */
++ IMX_DMATYPE_UART_SP, /* Shared UART */
++ IMX_DMATYPE_FIRI, /* FIRI */
++ IMX_DMATYPE_CSPI, /* MCU domain CSPI */
++ IMX_DMATYPE_CSPI_SP, /* Shared CSPI */
++ IMX_DMATYPE_SIM, /* SIM */
++ IMX_DMATYPE_ATA, /* ATA */
++ IMX_DMATYPE_CCM, /* CCM */
++ IMX_DMATYPE_EXT, /* External peripheral */
++ IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */
++ IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */
++ IMX_DMATYPE_DSP, /* DSP */
++ IMX_DMATYPE_MEMORY, /* Memory */
++ IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */
++ IMX_DMATYPE_SPDIF, /* SPDIF */
++ IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
++ IMX_DMATYPE_ASRC, /* ASRC */
++ IMX_DMATYPE_ESAI, /* ESAI */
++ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
++ IMX_DMATYPE_HDMI, /* HDMI Audio */
+ };
+
+ enum imx_dma_prio {
+@@ -55,6 +54,7 @@
+ int dma_request2; /* secondary DMA request line */
+ enum sdma_peripheral_type peripheral_type;
+ int priority;
++ void *data_addr1, *data_addr2;
+ };
+
+ static inline int imx_dma_is_ipu(struct dma_chan *chan)
+diff -Nur linux-4.1.3/include/linux/pxp_device.h linux-xbian-imx6/include/linux/pxp_device.h
+--- linux-4.1.3/include/linux/pxp_device.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/pxp_device.h 2015-07-27 23:13:09.634607108 +0200
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _PXP_DEVICE
++#define _PXP_DEVICE
++
++#include <linux/idr.h>
++#include <linux/hash.h>
++#include <uapi/linux/pxp_device.h>
++
++struct pxp_irq_info {
++ wait_queue_head_t waitq;
++ atomic_t irq_pending;
++ int hist_status;
++};
++
++struct pxp_buffer_hash {
++ struct hlist_head *hash_table;
++ u32 order;
++ spinlock_t hash_lock;
++};
++
++struct pxp_buf_obj {
++ uint32_t handle;
++
++ uint32_t size;
++ uint32_t mem_type;
++
++ unsigned long offset;
++ void *virtual;
++
++ struct hlist_node item;
++};
++
++struct pxp_chan_obj {
++ uint32_t handle;
++ struct dma_chan *chan;
++};
++
++/* File private data */
++struct pxp_file {
++ struct file *filp;
++
++ /* record allocated dma buffer */
++ struct idr buffer_idr;
++ spinlock_t buffer_lock;
++
++ /* record allocated dma channel */
++ struct idr channel_idr;
++ spinlock_t channel_lock;
++};
++
++#endif
+diff -Nur linux-4.1.3/include/linux/pxp_dma.h linux-xbian-imx6/include/linux/pxp_dma.h
+--- linux-4.1.3/include/linux/pxp_dma.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/linux/pxp_dma.h 2015-07-27 23:13:09.634607108 +0200
+@@ -0,0 +1,72 @@
++/*
++ * Copyright (C) 2010-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _PXP_DMA
++#define _PXP_DMA
++
++#include <uapi/linux/pxp_dma.h>
++
++struct pxp_tx_desc {
++ struct dma_async_tx_descriptor txd;
++ struct list_head tx_list;
++ struct list_head list;
++ int len;
++ union {
++ struct pxp_layer_param s0_param;
++ struct pxp_layer_param out_param;
++ struct pxp_layer_param ol_param;
++ } layer_param;
++ struct pxp_proc_data proc_data;
++
++ u32 hist_status; /* Histogram output status */
++
++ struct pxp_tx_desc *next;
++};
++
++struct pxp_channel {
++ struct dma_chan dma_chan;
++ dma_cookie_t completed; /* last completed cookie */
++ enum pxp_channel_status status;
++ void *client; /* Only one client per channel */
++ unsigned int n_tx_desc;
++ struct pxp_tx_desc *desc; /* allocated tx-descriptors */
++ struct list_head queue; /* queued tx-descriptors */
++ struct list_head list; /* track queued channel number */
++ spinlock_t lock; /* protects sg[0,1], queue,
++ * status, cookie, free_list
++ */
++ int active_buffer;
++ unsigned int eof_irq;
++ char eof_name[16]; /* EOF IRQ name for request_irq() */
++};
++
++#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)
++#define to_pxp_channel(d) container_of(d, struct pxp_channel, dma_chan)
++
++void pxp_txd_ack(struct dma_async_tx_descriptor *txd,
++ struct pxp_channel *pxp_chan);
++
++#ifdef CONFIG_MXC_PXP_CLIENT_DEVICE
++int register_pxp_device(void);
++void unregister_pxp_device(void);
++#else
++int register_pxp_device(void) { return 0; }
++void unregister_pxp_device(void) {}
++#endif
++
++#endif
+diff -Nur linux-4.1.3/include/mmc-mxcmmc.h linux-xbian-imx6/include/mmc-mxcmmc.h
+--- linux-4.1.3/include/mmc-mxcmmc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/mmc-mxcmmc.h 2015-07-27 23:13:09.770123601 +0200
+@@ -0,0 +1,39 @@
++#ifndef ASMARM_ARCH_MMC_H
++#define ASMARM_ARCH_MMC_H
++
++#include <linux/mmc/host.h>
++
++struct device;
++
++/* board specific SDHC data, optional.
++ * If not present, a writable card with 3,3V is assumed.
++ */
++struct imxmmc_platform_data {
++ /* Return values for the get_ro callback should be:
++ * 0 for a read/write card
++ * 1 for a read-only card
++ * -ENOSYS when not supported (equal to NULL callback)
++ * or a negative errno value when something bad happened
++ */
++ int (*get_ro)(struct device *);
++
++ /* board specific hook to (de)initialize the SD slot.
++ * The board code can call 'handler' on a card detection
++ * change giving data as argument.
++ */
++ int (*init)(struct device *dev, irq_handler_t handler, void *data);
++ void (*exit)(struct device *dev, void *data);
++
++ /* available voltages. If not given, assume
++ * MMC_VDD_32_33 | MMC_VDD_33_34
++ */
++ unsigned int ocr_avail;
++
++ /* adjust slot voltage */
++ void (*setpower)(struct device *, unsigned int vdd);
++
++ /* enable card detect using DAT3 */
++ int dat3_card_detect;
++};
++
++#endif
+diff -Nur linux-4.1.3/include/mtd-mxc_nand.h linux-xbian-imx6/include/mtd-mxc_nand.h
+--- linux-4.1.3/include/mtd-mxc_nand.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/mtd-mxc_nand.h 2015-07-27 23:13:09.770123601 +0200
+@@ -0,0 +1,32 @@
++/*
++ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
++ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version 2
++ * of the License, or (at your option) any later version.
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
++ * MA 02110-1301, USA.
++ */
++
++#ifndef __ASM_ARCH_NAND_H
++#define __ASM_ARCH_NAND_H
++
++#include <linux/mtd/partitions.h>
++
++struct mxc_nand_platform_data {
++ unsigned int width; /* data bus width in bytes */
++ unsigned int hw_ecc:1; /* 0 if suppress hardware ECC */
++ unsigned int flash_bbt:1; /* set to 1 to use a flash based bbt */
++ struct mtd_partition *parts; /* partition table */
++ int nr_parts; /* size of parts */
++};
++#endif /* __ASM_ARCH_NAND_H */
+diff -Nur linux-4.1.3/include/sound/soc.h linux-xbian-imx6/include/sound/soc.h
+--- linux-4.1.3/include/sound/soc.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/sound/soc.h 2015-07-27 23:13:09.837881847 +0200
+@@ -28,6 +28,18 @@
+ #include <sound/control.h>
+ #include <sound/ac97_codec.h>
+
++/*enum snd_soc_control_type {
++ SND_SOC_CUSTOM,
++ SND_SOC_I2C,
++ SND_SOC_SPI,
++};*/
++
++enum snd_soc_control_type {
++ SND_SOC_I2C = 1,
++ SND_SOC_SPI,
++ SND_SOC_REGMAP,
++};
++
+ /*
+ * Convenience kcontrol builders
+ */
+@@ -805,16 +817,24 @@
+
+ struct list_head list;
+ struct list_head card_list;
++ int (*volatile_register)(struct snd_soc_codec *, unsigned int);
+
+ /* runtime */
+ unsigned int cache_bypass:1; /* Suppress access to the cache */
+ unsigned int suspended:1; /* Codec is in suspend PM state */
+ unsigned int cache_init:1; /* codec cache has been initialized */
++ unsigned int using_regmap:1; /* using regmap access */
++ u32 cache_only; /* Suppress writes to hardware */
++ u32 cache_sync; /* Cache needs to be synced to hardware */
+
+ /* codec IO */
+ void *control_data; /* codec control (i2c/3wire) data */
+ hw_write_t hw_write;
++ unsigned int (*read)(struct snd_soc_codec *, unsigned int);
++ int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
+ void *reg_cache;
++ struct mutex cache_rw_mutex;
++ int val_bytes;
+
+ /* component */
+ struct snd_soc_component component;
+diff -Nur linux-4.1.3/include/sync.h linux-xbian-imx6/include/sync.h
+--- linux-4.1.3/include/sync.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/sync.h 2015-07-27 23:13:09.837881847 +0200
+@@ -0,0 +1,356 @@
++/*
++ * include/linux/sync.h
++ *
++ * Copyright (C) 2012 Google, Inc.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#ifndef _LINUX_SYNC_H
++#define _LINUX_SYNC_H
++
++#include <linux/types.h>
++#include <linux/kref.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/wait.h>
++#include <linux/fence.h>
++
++#include "uapi/sync.h"
++
++struct sync_timeline;
++struct sync_pt;
++struct sync_fence;
++
++/**
++ * struct sync_timeline_ops - sync object implementation ops
++ * @driver_name: name of the implementation
++ * @dup: duplicate a sync_pt
++ * @has_signaled: returns:
++ * 1 if pt has signaled
++ * 0 if pt has not signaled
++ * <0 on error
++ * @compare: returns:
++ * 1 if b will signal before a
++ * 0 if a and b will signal at the same time
++ * -1 if a will signal before b
++ * @free_pt: called before sync_pt is freed
++ * @release_obj: called before sync_timeline is freed
++ * @fill_driver_data: write implementation specific driver data to data.
++ * should return an error if there is not enough room
++ * as specified by size. This information is returned
++ * to userspace by SYNC_IOC_FENCE_INFO.
++ * @timeline_value_str: fill str with the value of the sync_timeline's counter
++ * @pt_value_str: fill str with the value of the sync_pt
++ */
++struct sync_timeline_ops {
++ const char *driver_name;
++
++ /* required */
++ struct sync_pt * (*dup)(struct sync_pt *pt);
++
++ /* required */
++ int (*has_signaled)(struct sync_pt *pt);
++
++ /* required */
++ int (*compare)(struct sync_pt *a, struct sync_pt *b);
++
++ /* optional */
++ void (*free_pt)(struct sync_pt *sync_pt);
++
++ /* optional */
++ void (*release_obj)(struct sync_timeline *sync_timeline);
++
++ /* optional */
++ int (*fill_driver_data)(struct sync_pt *syncpt, void *data, int size);
++
++ /* optional */
++ void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
++ int size);
++
++ /* optional */
++ void (*pt_value_str)(struct sync_pt *pt, char *str, int size);
++};
++
++/**
++ * struct sync_timeline - sync object
++ * @kref: reference count on fence.
++ * @ops: ops that define the implementation of the sync_timeline
++ * @name: name of the sync_timeline. Useful for debugging
++ * @destroyed: set when sync_timeline is destroyed
++ * @child_list_head: list of children sync_pts for this sync_timeline
++ * @child_list_lock: lock protecting @child_list_head, destroyed, and
++ * sync_pt.status
++ * @active_list_head: list of active (unsignaled/errored) sync_pts
++ * @sync_timeline_list: membership in global sync_timeline_list
++ */
++struct sync_timeline {
++ struct kref kref;
++ const struct sync_timeline_ops *ops;
++ char name[32];
++
++ /* protected by child_list_lock */
++ bool destroyed;
++ int context, value;
++
++ struct list_head child_list_head;
++ spinlock_t child_list_lock;
++
++ struct list_head active_list_head;
++
++#ifdef CONFIG_DEBUG_FS
++ struct list_head sync_timeline_list;
++#endif
++};
++
++/**
++ * struct sync_pt - sync point
++ * @fence: base fence class
++ * @child_list: membership in sync_timeline.child_list_head
++ * @active_list: membership in sync_timeline.active_list_head
++ * @signaled_list: membership in temporary signaled_list on stack
++ * @fence: sync_fence to which the sync_pt belongs
++ * @pt_list: membership in sync_fence.pt_list_head
++ * @status: 1: signaled, 0:active, <0: error
++ * @timestamp: time which sync_pt status transitioned from active to
++ * signaled or error.
++ */
++struct sync_pt {
++ struct fence base;
++
++ struct list_head child_list;
++ struct list_head active_list;
++};
++
++static inline struct sync_timeline *sync_pt_parent(struct sync_pt *pt)
++{
++ return container_of(pt->base.lock, struct sync_timeline,
++ child_list_lock);
++}
++
++struct sync_fence_cb {
++ struct fence_cb cb;
++ struct fence *sync_pt;
++ struct sync_fence *fence;
++};
++
++/**
++ * struct sync_fence - sync fence
++ * @file: file representing this fence
++ * @kref: reference count on fence.
++ * @name: name of sync_fence. Useful for debugging
++ * @pt_list_head: list of sync_pts in the fence. immutable once fence
++ * is created
++ * @status: 0: signaled, >0:active, <0: error
++ *
++ * @wq: wait queue for fence signaling
++ * @sync_fence_list: membership in global fence list
++ */
++struct sync_fence {
++ struct file *file;
++ struct kref kref;
++ char name[32];
++#ifdef CONFIG_DEBUG_FS
++ struct list_head sync_fence_list;
++#endif
++ int num_fences;
++
++ wait_queue_head_t wq;
++ atomic_t status;
++
++ struct sync_fence_cb cbs[];
++};
++
++struct sync_fence_waiter;
++typedef void (*sync_callback_t)(struct sync_fence *fence,
++ struct sync_fence_waiter *waiter);
++
++/**
++ * struct sync_fence_waiter - metadata for asynchronous waiter on a fence
++ * @waiter_list: membership in sync_fence.waiter_list_head
++ * @callback: function pointer to call when fence signals
++ * @callback_data: pointer to pass to @callback
++ */
++struct sync_fence_waiter {
++ wait_queue_t work;
++ sync_callback_t callback;
++};
++
++static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter,
++ sync_callback_t callback)
++{
++ INIT_LIST_HEAD(&waiter->work.task_list);
++ waiter->callback = callback;
++}
++
++/*
++ * API for sync_timeline implementers
++ */
++
++/**
++ * sync_timeline_create() - creates a sync object
++ * @ops: specifies the implementation ops for the object
++ * @size: size to allocate for this obj
++ * @name: sync_timeline name
++ *
++ * Creates a new sync_timeline which will use the implementation specified by
++ * @ops. @size bytes will be allocated allowing for implementation specific
++ * data to be kept after the generic sync_timeline struct.
++ */
++struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
++ int size, const char *name);
++
++/**
++ * sync_timeline_destroy() - destroys a sync object
++ * @obj: sync_timeline to destroy
++ *
++ * A sync implementation should call this when the @obj is going away
++ * (i.e. module unload.) @obj won't actually be freed until all its children
++ * sync_pts are freed.
++ */
++void sync_timeline_destroy(struct sync_timeline *obj);
++
++/**
++ * sync_timeline_signal() - signal a status change on a sync_timeline
++ * @obj: sync_timeline to signal
++ *
++ * A sync implementation should call this any time one of it's sync_pts
++ * has signaled or has an error condition.
++ */
++void sync_timeline_signal(struct sync_timeline *obj);
++
++/**
++ * sync_pt_create() - creates a sync pt
++ * @parent: sync_pt's parent sync_timeline
++ * @size: size to allocate for this pt
++ *
++ * Creates a new sync_pt as a child of @parent. @size bytes will be
++ * allocated allowing for implementation specific data to be kept after
++ * the generic sync_timeline struct.
++ */
++struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size);
++
++/**
++ * sync_pt_free() - frees a sync pt
++ * @pt: sync_pt to free
++ *
++ * This should only be called on sync_pts which have been created but
++ * not added to a fence.
++ */
++void sync_pt_free(struct sync_pt *pt);
++
++/**
++ * sync_fence_create() - creates a sync fence
++ * @name: name of fence to create
++ * @pt: sync_pt to add to the fence
++ *
++ * Creates a fence containg @pt. Once this is called, the fence takes
++ * ownership of @pt.
++ */
++struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt);
++
++/*
++ * API for sync_fence consumers
++ */
++
++/**
++ * sync_fence_merge() - merge two fences
++ * @name: name of new fence
++ * @a: fence a
++ * @b: fence b
++ *
++ * Creates a new fence which contains copies of all the sync_pts in both
++ * @a and @b. @a and @b remain valid, independent fences.
++ */
++struct sync_fence *sync_fence_merge(const char *name,
++ struct sync_fence *a, struct sync_fence *b);
++
++/**
++ * sync_fence_fdget() - get a fence from an fd
++ * @fd: fd referencing a fence
++ *
++ * Ensures @fd references a valid fence, increments the refcount of the backing
++ * file, and returns the fence.
++ */
++struct sync_fence *sync_fence_fdget(int fd);
++
++/**
++ * sync_fence_put() - puts a reference of a sync fence
++ * @fence: fence to put
++ *
++ * Puts a reference on @fence. If this is the last reference, the fence and
++ * all it's sync_pts will be freed
++ */
++void sync_fence_put(struct sync_fence *fence);
++
++/**
++ * sync_fence_install() - installs a fence into a file descriptor
++ * @fence: fence to install
++ * @fd: file descriptor in which to install the fence
++ *
++ * Installs @fence into @fd. @fd's should be acquired through
++ * get_unused_fd_flags(O_CLOEXEC).
++ */
++void sync_fence_install(struct sync_fence *fence, int fd);
++
++/**
++ * sync_fence_wait_async() - registers and async wait on the fence
++ * @fence: fence to wait on
++ * @waiter: waiter callback struck
++ *
++ * Returns 1 if @fence has already signaled.
++ *
++ * Registers a callback to be called when @fence signals or has an error.
++ * @waiter should be initialized with sync_fence_waiter_init().
++ */
++int sync_fence_wait_async(struct sync_fence *fence,
++ struct sync_fence_waiter *waiter);
++
++/**
++ * sync_fence_cancel_async() - cancels an async wait
++ * @fence: fence to wait on
++ * @waiter: waiter callback struck
++ *
++ * returns 0 if waiter was removed from fence's async waiter list.
++ * returns -ENOENT if waiter was not found on fence's async waiter list.
++ *
++ * Cancels a previously registered async wait. Will fail gracefully if
++ * @waiter was never registered or if @fence has already signaled @waiter.
++ */
++int sync_fence_cancel_async(struct sync_fence *fence,
++ struct sync_fence_waiter *waiter);
++
++/**
++ * sync_fence_wait() - wait on fence
++ * @fence: fence to wait on
++ * @tiemout: timeout in ms
++ *
++ * Wait for @fence to be signaled or have an error. Waits indefinitely
++ * if @timeout < 0
++ */
++int sync_fence_wait(struct sync_fence *fence, long timeout);
++
++#ifdef CONFIG_DEBUG_FS
++
++extern void sync_timeline_debug_add(struct sync_timeline *obj);
++extern void sync_timeline_debug_remove(struct sync_timeline *obj);
++extern void sync_fence_debug_add(struct sync_fence *fence);
++extern void sync_fence_debug_remove(struct sync_fence *fence);
++extern void sync_dump(void);
++
++#else
++# define sync_timeline_debug_add(obj)
++# define sync_timeline_debug_remove(obj)
++# define sync_fence_debug_add(fence)
++# define sync_fence_debug_remove(fence)
++# define sync_dump()
++#endif
++int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
++ int wake_flags, void *key);
++
++#endif /* _LINUX_SYNC_H */
+diff -Nur linux-4.1.3/include/uapi/linux/fb.h linux-xbian-imx6/include/uapi/linux/fb.h
+--- linux-4.1.3/include/uapi/linux/fb.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/uapi/linux/fb.h 2015-07-27 23:13:09.853824963 +0200
+@@ -217,14 +217,45 @@
+ #define FB_SYNC_ON_GREEN 32 /* sync on green */
+
+ #define FB_VMODE_NONINTERLACED 0 /* non interlaced */
+-#define FB_VMODE_INTERLACED 1 /* interlaced */
+-#define FB_VMODE_DOUBLE 2 /* double scan */
+-#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */
+-#define FB_VMODE_MASK 255
+-
+-#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */
+-#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */
+-#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */
++#define FB_VMODE_INTERLACED BIT(1) /* interlaced */
++#define FB_VMODE_DOUBLE BIT(2) /* double scan */
++#define FB_VMODE_ODD_FLD_FIRST BIT(3) /* interlaced: top line first */
++#define FB_VMODE_MASK_SIMPLE (BIT(1) | \
++ BIT(2))
++/*
++ * mxc_edid is taking 16 and 32 for ASPECT_16_9/4_3
++ */
++#define FB_VMODE_3D_SBS_HALF BIT(4) /* HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF */
++#define FB_VMODE_3D_SBS_FULL BIT(5) /* HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL */
++#define FB_VMODE_3D_TOP_BOTTOM BIT(6) /* HDMI_3D_STRUCTURE_TOP_AND_BOTTOM */
++#define FB_VMODE_3D_FRAME_PACK BIT(7) /* HDMI_3D_STRUCTURE_FRAME_PACKING */
++#define FB_VMODE_3D_MASK (BIT(4) | \
++ BIT(5) | \
++ BIT(6) | \
++ BIT(7))
++
++#define FB_VMODE_YWRAP BIT(8) /* ywrap instead of panning */
++#define FB_VMODE_SMOOTH_XPAN BIT(9) /* smooth xpan possible (internally used) */
++#define FB_VMODE_CONUPDATE BIT(9) /* don't update x/yoffset */
++
++#define FB_VMODE_ASPECT_1 BIT(10)
++#define FB_VMODE_ASPECT_4_3 BIT(11)
++#define FB_VMODE_ASPECT_5_4 BIT(12)
++#define FB_VMODE_ASPECT_16_9 BIT(13)
++#define FB_VMODE_ASPECT_16_10 BIT(14)
++#define FB_VMODE_ASPECT_MASK (BIT(10) | \
++ BIT(11) | \
++ BIT(12) | \
++ BIT(13) | \
++ BIT(14))
++
++#define FB_VMODE_FRACTIONAL BIT(15)
++
++#define FB_VMODE_MASK (FB_VMODE_MASK_SIMPLE | \
++ FB_VMODE_3D_MASK | \
++ FB_VMODE_ASPECT_MASK | \
++ FB_VMODE_FRACTIONAL)
++
+
+ /*
+ * Display rotation support
+diff -Nur linux-4.1.3/include/uapi/linux/ipu.h linux-xbian-imx6/include/uapi/linux/ipu.h
+--- linux-4.1.3/include/uapi/linux/ipu.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/linux/ipu.h 2015-07-27 23:13:09.861796522 +0200
+@@ -0,0 +1,282 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @defgroup IPU MXC Image Processing Unit (IPU) Driver
++ */
++/*!
++ * @file uapi/linux/ipu.h
++ *
++ * @brief This file contains the IPU driver API declarations.
++ *
++ * @ingroup IPU
++ */
++
++#ifndef __ASM_ARCH_IPU_H__
++#define __ASM_ARCH_IPU_H__
++
++#include <linux/types.h>
++#include <linux/videodev2.h>
++
++#ifndef __KERNEL__
++#ifndef __cplusplus
++typedef unsigned char bool;
++#endif
++#define irqreturn_t int
++#define dma_addr_t int
++#define uint32_t unsigned int
++#define uint16_t unsigned short
++#define uint8_t unsigned char
++#define u32 unsigned int
++#define u8 unsigned char
++#define __u32 u32
++#endif
++
++/*!
++ * Enumeration of IPU rotation modes
++ */
++typedef enum {
++ /* Note the enum values correspond to BAM value */
++ IPU_ROTATE_NONE = 0,
++ IPU_ROTATE_VERT_FLIP = 1,
++ IPU_ROTATE_HORIZ_FLIP = 2,
++ IPU_ROTATE_180 = 3,
++ IPU_ROTATE_90_RIGHT = 4,
++ IPU_ROTATE_90_RIGHT_VFLIP = 5,
++ IPU_ROTATE_90_RIGHT_HFLIP = 6,
++ IPU_ROTATE_90_LEFT = 7,
++} ipu_rotate_mode_t;
++
++/*!
++ * Enumeration of VDI MOTION select
++ */
++typedef enum {
++ MED_MOTION = 0,
++ LOW_MOTION = 1,
++ HIGH_MOTION = 2,
++} ipu_motion_sel;
++
++/*!
++ * Enumeration of DI ports for ADC.
++ */
++typedef enum {
++ DISP0,
++ DISP1,
++ DISP2,
++ DISP3
++} display_port_t;
++
++/* IPU Pixel format definitions */
++/* Four-character-code (FOURCC) */
++#define fourcc(a, b, c, d)\
++ (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
++
++/*!
++ * @name IPU Pixel Formats
++ *
++ * Pixel formats are defined with ASCII FOURCC code. The pixel format codes are
++ * the same used by V4L2 API.
++ */
++
++/*! @{ */
++/*! @name Generic or Raw Data Formats */
++/*! @{ */
++#define IPU_PIX_FMT_GENERIC fourcc('I', 'P', 'U', '0') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_GENERIC_32 fourcc('I', 'P', 'U', '1') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_GENERIC_16 fourcc('I', 'P', 'U', '2') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_LVDS666 fourcc('L', 'V', 'D', '6') /*!< IPU Generic Data */
++#define IPU_PIX_FMT_LVDS888 fourcc('L', 'V', 'D', '8') /*!< IPU Generic Data */
++/*! @} */
++/*! @name RGB Formats */
++/*! @{ */
++#define IPU_PIX_FMT_RGB332 fourcc('R', 'G', 'B', '1') /*!< 8 RGB-3-3-2 */
++#define IPU_PIX_FMT_RGB555 fourcc('R', 'G', 'B', 'O') /*!< 16 RGB-5-5-5 */
++#define IPU_PIX_FMT_RGB565 fourcc('R', 'G', 'B', 'P') /*!< 1 6 RGB-5-6-5 */
++#define IPU_PIX_FMT_RGB666 fourcc('R', 'G', 'B', '6') /*!< 18 RGB-6-6-6 */
++#define IPU_PIX_FMT_BGR666 fourcc('B', 'G', 'R', '6') /*!< 18 BGR-6-6-6 */
++#define IPU_PIX_FMT_BGR24 fourcc('B', 'G', 'R', '3') /*!< 24 BGR-8-8-8 */
++#define IPU_PIX_FMT_RGB24 fourcc('R', 'G', 'B', '3') /*!< 24 RGB-8-8-8 */
++#define IPU_PIX_FMT_GBR24 fourcc('G', 'B', 'R', '3') /*!< 24 GBR-8-8-8 */
++#define IPU_PIX_FMT_BGR32 fourcc('B', 'G', 'R', '4') /*!< 32 BGR-8-8-8-8 */
++#define IPU_PIX_FMT_BGRA32 fourcc('B', 'G', 'R', 'A') /*!< 32 BGR-8-8-8-8 */
++#define IPU_PIX_FMT_RGB32 fourcc('R', 'G', 'B', '4') /*!< 32 RGB-8-8-8-8 */
++#define IPU_PIX_FMT_RGBA32 fourcc('R', 'G', 'B', 'A') /*!< 32 RGB-8-8-8-8 */
++#define IPU_PIX_FMT_ABGR32 fourcc('A', 'B', 'G', 'R') /*!< 32 ABGR-8-8-8-8 */
++/*! @} */
++/*! @name YUV Interleaved Formats */
++/*! @{ */
++#define IPU_PIX_FMT_YUYV fourcc('Y', 'U', 'Y', 'V') /*!< 16 YUV 4:2:2 */
++#define IPU_PIX_FMT_UYVY fourcc('U', 'Y', 'V', 'Y') /*!< 16 YUV 4:2:2 */
++#define IPU_PIX_FMT_YVYU fourcc('Y', 'V', 'Y', 'U') /*!< 16 YVYU 4:2:2 */
++#define IPU_PIX_FMT_VYUY fourcc('V', 'Y', 'U', 'Y') /*!< 16 VYYU 4:2:2 */
++#define IPU_PIX_FMT_Y41P fourcc('Y', '4', '1', 'P') /*!< 12 YUV 4:1:1 */
++#define IPU_PIX_FMT_YUV444 fourcc('Y', '4', '4', '4') /*!< 24 YUV 4:4:4 */
++#define IPU_PIX_FMT_VYU444 fourcc('V', '4', '4', '4') /*!< 24 VYU 4:4:4 */
++/* two planes -- one Y, one Cb + Cr interleaved */
++#define IPU_PIX_FMT_NV12 fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
++/* two planes -- 12 tiled Y/CbCr 4:2:0 */
++#define IPU_PIX_FMT_TILED_NV12 fourcc('T', 'N', 'V', 'P')
++#define IPU_PIX_FMT_TILED_NV12F fourcc('T', 'N', 'V', 'F')
++
++/*! @} */
++/*! @name YUV Planar Formats */
++/*! @{ */
++#define IPU_PIX_FMT_GREY fourcc('G', 'R', 'E', 'Y') /*!< 8 Greyscale */
++#define IPU_PIX_FMT_YVU410P fourcc('Y', 'V', 'U', '9') /*!< 9 YVU 4:1:0 */
++#define IPU_PIX_FMT_YUV410P fourcc('Y', 'U', 'V', '9') /*!< 9 YUV 4:1:0 */
++#define IPU_PIX_FMT_YVU420P fourcc('Y', 'V', '1', '2') /*!< 12 YVU 4:2:0 */
++#define IPU_PIX_FMT_YUV420P fourcc('I', '4', '2', '0') /*!< 12 YUV 4:2:0 */
++#define IPU_PIX_FMT_YUV420P2 fourcc('Y', 'U', '1', '2') /*!< 12 YUV 4:2:0 */
++#define IPU_PIX_FMT_YVU422P fourcc('Y', 'V', '1', '6') /*!< 16 YVU 4:2:2 */
++#define IPU_PIX_FMT_YUV422P fourcc('4', '2', '2', 'P') /*!< 16 YUV 4:2:2 */
++/* non-interleaved 4:4:4 */
++#define IPU_PIX_FMT_YUV444P fourcc('4', '4', '4', 'P') /*!< 24 YUV 4:4:4 */
++/*! @} */
++#define IPU_PIX_FMT_TILED_NV12_MBALIGN (16)
++#define TILED_NV12_FRAME_SIZE(w, h) \
++ (ALIGN((w) * (h), SZ_4K) + ALIGN((w) * (h) / 2, SZ_4K))
++/* IPU device */
++typedef enum {
++ RGB_CS,
++ YUV_CS,
++ NULL_CS
++} cs_t;
++
++struct ipu_pos {
++ u32 x;
++ u32 y;
++};
++
++struct ipu_crop {
++ struct ipu_pos pos;
++ u32 w;
++ u32 h;
++};
++
++struct ipu_deinterlace {
++ bool enable;
++ u8 motion; /*see ipu_motion_sel*/
++#define IPU_DEINTERLACE_FIELD_TOP 0
++#define IPU_DEINTERLACE_FIELD_BOTTOM 1
++#define IPU_DEINTERLACE_FIELD_MASK \
++ (IPU_DEINTERLACE_FIELD_TOP | IPU_DEINTERLACE_FIELD_BOTTOM)
++ /* deinterlace frame rate double flags */
++#define IPU_DEINTERLACE_RATE_EN 0x80
++#define IPU_DEINTERLACE_RATE_FRAME1 0x40
++#define IPU_DEINTERLACE_RATE_MASK \
++ (IPU_DEINTERLACE_RATE_EN | IPU_DEINTERLACE_RATE_FRAME1)
++#define IPU_DEINTERLACE_MAX_FRAME 2
++ u8 field_fmt;
++};
++
++struct ipu_input {
++ u32 width;
++ u32 height;
++ u32 format;
++ struct ipu_crop crop;
++ dma_addr_t paddr;
++
++ struct ipu_deinterlace deinterlace;
++ dma_addr_t paddr_n; /*valid when deinterlace enable*/
++};
++
++struct ipu_alpha {
++#define IPU_ALPHA_MODE_GLOBAL 0
++#define IPU_ALPHA_MODE_LOCAL 1
++ u8 mode;
++ u8 gvalue; /* 0~255 */
++ dma_addr_t loc_alp_paddr;
++};
++
++struct ipu_colorkey {
++ bool enable;
++ u32 value; /* RGB 24bit */
++};
++
++struct ipu_overlay {
++ u32 width;
++ u32 height;
++ u32 format;
++ struct ipu_crop crop;
++ struct ipu_alpha alpha;
++ struct ipu_colorkey colorkey;
++ dma_addr_t paddr;
++};
++
++struct ipu_output {
++ u32 width;
++ u32 height;
++ u32 format;
++ u8 rotate;
++ struct ipu_crop crop;
++ dma_addr_t paddr;
++};
++
++struct ipu_task {
++ struct ipu_input input;
++ struct ipu_output output;
++
++ bool overlay_en;
++ struct ipu_overlay overlay;
++
++#define IPU_TASK_PRIORITY_NORMAL 0
++#define IPU_TASK_PRIORITY_HIGH 1
++ u8 priority;
++
++#define IPU_TASK_ID_ANY 0
++#define IPU_TASK_ID_VF 1
++#define IPU_TASK_ID_PP 2
++#define IPU_TASK_ID_MAX 3
++ u8 task_id;
++
++ int timeout;
++};
++
++enum {
++ IPU_CHECK_OK = 0,
++ IPU_CHECK_WARN_INPUT_OFFS_NOT8ALIGN = 0x1,
++ IPU_CHECK_WARN_OUTPUT_OFFS_NOT8ALIGN = 0x2,
++ IPU_CHECK_WARN_OVERLAY_OFFS_NOT8ALIGN = 0x4,
++ IPU_CHECK_ERR_MIN,
++ IPU_CHECK_ERR_INPUT_CROP,
++ IPU_CHECK_ERR_OUTPUT_CROP,
++ IPU_CHECK_ERR_OVERLAY_CROP,
++ IPU_CHECK_ERR_INPUT_OVER_LIMIT,
++ IPU_CHECK_ERR_OV_OUT_NO_FIT,
++ IPU_CHECK_ERR_OVERLAY_WITH_VDI,
++ IPU_CHECK_ERR_PROC_NO_NEED,
++ IPU_CHECK_ERR_SPLIT_INPUTW_OVER,
++ IPU_CHECK_ERR_SPLIT_INPUTH_OVER,
++ IPU_CHECK_ERR_SPLIT_OUTPUTW_OVER,
++ IPU_CHECK_ERR_SPLIT_OUTPUTH_OVER,
++ IPU_CHECK_ERR_SPLIT_WITH_ROT,
++ IPU_CHECK_ERR_NOT_SUPPORT,
++ IPU_CHECK_ERR_NOT16ALIGN,
++ IPU_CHECK_ERR_W_DOWNSIZE_OVER,
++ IPU_CHECK_ERR_H_DOWNSIZE_OVER,
++};
++
++/* IOCTL commands */
++#define IPU_CHECK_TASK _IOWR('I', 0x1, struct ipu_task)
++#define IPU_QUEUE_TASK _IOW('I', 0x2, struct ipu_task)
++#define IPU_ALLOC _IOWR('I', 0x3, int)
++#define IPU_FREE _IOW('I', 0x4, int)
++
++#endif
+diff -Nur linux-4.1.3/include/uapi/linux/Kbuild linux-xbian-imx6/include/uapi/linux/Kbuild
+--- linux-4.1.3/include/uapi/linux/Kbuild 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/include/uapi/linux/Kbuild 2015-07-27 23:13:09.849839184 +0200
+@@ -266,6 +266,9 @@
+ header-y += msdos_fs.h
+ header-y += msg.h
+ header-y += mtio.h
++header-y += mxcfb.h
++header-y += mxc_dcic.h
++header-y += ipu.h
+ header-y += nbd.h
+ header-y += ncp_fs.h
+ header-y += ncp.h
+@@ -451,3 +454,6 @@
+ header-y += xilinx-v4l2-controls.h
+ header-y += zorro.h
+ header-y += zorro_ids.h
++
++header-y += pxp_device.h
++header-y += pxp_dma.h
+diff -Nur linux-4.1.3/include/uapi/linux/mxc_asrc.h linux-xbian-imx6/include/uapi/linux/mxc_asrc.h
+--- linux-4.1.3/include/uapi/linux/mxc_asrc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/linux/mxc_asrc.h 2015-07-27 23:13:09.865782301 +0200
+@@ -0,0 +1,143 @@
++/*
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ *
++ * @file mxc_asrc.h
++ *
++ * @brief i.MX Asynchronous Sample Rate Converter
++ *
++ * @ingroup Audio
++ */
++
++#ifndef __MXC_ASRC_UAPI_H__
++#define __MXC_ASRC_UAPI_H__
++
++#define ASRC_IOC_MAGIC 'C'
++
++#define ASRC_REQ_PAIR _IOWR(ASRC_IOC_MAGIC, 0, struct asrc_req)
++#define ASRC_CONFIG_PAIR _IOWR(ASRC_IOC_MAGIC, 1, struct asrc_config)
++#define ASRC_RELEASE_PAIR _IOW(ASRC_IOC_MAGIC, 2, enum asrc_pair_index)
++#define ASRC_CONVERT _IOW(ASRC_IOC_MAGIC, 3, struct asrc_convert_buffer)
++#define ASRC_START_CONV _IOW(ASRC_IOC_MAGIC, 4, enum asrc_pair_index)
++#define ASRC_STOP_CONV _IOW(ASRC_IOC_MAGIC, 5, enum asrc_pair_index)
++#define ASRC_STATUS _IOW(ASRC_IOC_MAGIC, 6, struct asrc_status_flags)
++#define ASRC_FLUSH _IOW(ASRC_IOC_MAGIC, 7, enum asrc_pair_index)
++
++enum asrc_pair_index {
++ ASRC_UNVALID_PAIR = -1,
++ ASRC_PAIR_A = 0,
++ ASRC_PAIR_B = 1,
++ ASRC_PAIR_C = 2,
++};
++
++#define ASRC_PAIR_MAX_NUM (ASRC_PAIR_C + 1)
++
++enum asrc_inclk {
++ INCLK_NONE = 0x03,
++ INCLK_ESAI_RX = 0x00,
++ INCLK_SSI1_RX = 0x01,
++ INCLK_SSI2_RX = 0x02,
++ INCLK_SSI3_RX = 0x07,
++ INCLK_SPDIF_RX = 0x04,
++ INCLK_MLB_CLK = 0x05,
++ INCLK_PAD = 0x06,
++ INCLK_ESAI_TX = 0x08,
++ INCLK_SSI1_TX = 0x09,
++ INCLK_SSI2_TX = 0x0a,
++ INCLK_SSI3_TX = 0x0b,
++ INCLK_SPDIF_TX = 0x0c,
++ INCLK_ASRCK1_CLK = 0x0f,
++};
++
++enum asrc_outclk {
++ OUTCLK_NONE = 0x03,
++ OUTCLK_ESAI_TX = 0x00,
++ OUTCLK_SSI1_TX = 0x01,
++ OUTCLK_SSI2_TX = 0x02,
++ OUTCLK_SSI3_TX = 0x07,
++ OUTCLK_SPDIF_TX = 0x04,
++ OUTCLK_MLB_CLK = 0x05,
++ OUTCLK_PAD = 0x06,
++ OUTCLK_ESAI_RX = 0x08,
++ OUTCLK_SSI1_RX = 0x09,
++ OUTCLK_SSI2_RX = 0x0a,
++ OUTCLK_SSI3_RX = 0x0b,
++ OUTCLK_SPDIF_RX = 0x0c,
++ OUTCLK_ASRCK1_CLK = 0x0f,
++};
++
++enum asrc_word_width {
++ ASRC_WIDTH_24_BIT = 0,
++ ASRC_WIDTH_16_BIT = 1,
++ ASRC_WIDTH_8_BIT = 2,
++};
++
++struct asrc_config {
++ enum asrc_pair_index pair;
++ unsigned int channel_num;
++ unsigned int buffer_num;
++ unsigned int dma_buffer_size;
++ unsigned int input_sample_rate;
++ unsigned int output_sample_rate;
++ enum asrc_word_width input_word_width;
++ enum asrc_word_width output_word_width;
++ enum asrc_inclk inclk;
++ enum asrc_outclk outclk;
++};
++
++struct asrc_pair {
++ unsigned int start_channel;
++ unsigned int chn_num;
++ unsigned int chn_max;
++ unsigned int active;
++ unsigned int overload_error;
++};
++
++struct asrc_req {
++ unsigned int chn_num;
++ enum asrc_pair_index index;
++};
++
++struct asrc_querybuf {
++ unsigned int buffer_index;
++ unsigned int input_length;
++ unsigned int output_length;
++ unsigned long input_offset;
++ unsigned long output_offset;
++};
++
++struct asrc_convert_buffer {
++ void *input_buffer_vaddr;
++ void *output_buffer_vaddr;
++ unsigned int input_buffer_length;
++ unsigned int output_buffer_length;
++};
++
++struct asrc_buffer {
++ unsigned int index;
++ unsigned int length;
++ unsigned int output_last_length;
++ int buf_valid;
++};
++
++struct asrc_status_flags {
++ enum asrc_pair_index index;
++ unsigned int overload_error;
++};
++
++#define ASRC_BUF_NA -35 /* ASRC DQ's buffer is NOT available */
++#define ASRC_BUF_AV 35 /* ASRC DQ's buffer is available */
++enum asrc_error_status {
++ ASRC_TASK_Q_OVERLOAD = 0x01,
++ ASRC_OUTPUT_TASK_OVERLOAD = 0x02,
++ ASRC_INPUT_TASK_OVERLOAD = 0x04,
++ ASRC_OUTPUT_BUFFER_OVERFLOW = 0x08,
++ ASRC_INPUT_BUFFER_UNDERRUN = 0x10,
++};
++#endif/* __MXC_ASRC_UAPI_H__ */
+diff -Nur linux-4.1.3/include/uapi/linux/mxc_dcic.h linux-xbian-imx6/include/uapi/linux/mxc_dcic.h
+--- linux-4.1.3/include/uapi/linux/mxc_dcic.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/linux/mxc_dcic.h 2015-07-27 23:13:09.865782301 +0200
+@@ -0,0 +1,49 @@
++/*
++ * Copyright (C) 2014 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file uapi/linux/mxc_dcic.h
++ *
++ * @brief MXC DCIC private header file
++ *
++ * @ingroup MXC DCIC
++ */
++#ifndef __ASM_ARCH_MXC_DCIC_H__
++#define __ASM_ARCH_MXC_DCIC_H__
++
++#define DCIC_IOC_ALLOC_ROI_NUM _IO('D', 10)
++#define DCIC_IOC_FREE_ROI_NUM _IO('D', 11)
++#define DCIC_IOC_CONFIG_DCIC _IO('D', 12)
++#define DCIC_IOC_CONFIG_ROI _IO('D', 13)
++#define DCIC_IOC_GET_RESULT _IO('D', 14)
++#define DCIC_IOC_START_VSYNC _IO('D', 15)
++#define DCIC_IOC_STOP_VSYNC _IO('D', 16)
++
++struct roi_params {
++ unsigned int roi_n;
++ unsigned int ref_sig;
++ unsigned int start_y;
++ unsigned int start_x;
++ unsigned int end_y;
++ unsigned int end_x;
++ char freeze;
++};
++
++#endif
+diff -Nur linux-4.1.3/include/uapi/linux/mxcfb.h linux-xbian-imx6/include/uapi/linux/mxcfb.h
+--- linux-4.1.3/include/uapi/linux/mxcfb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/linux/mxcfb.h 2015-07-27 23:13:09.865782301 +0200
+@@ -0,0 +1,176 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*
++ * @file uapi/linux/mxcfb.h
++ *
++ * @brief Global header file for the MXC frame buffer
++ *
++ * @ingroup Framebuffer
++ */
++#ifndef __ASM_ARCH_MXCFB_H__
++#define __ASM_ARCH_MXCFB_H__
++
++#include <linux/fb.h>
++
++#define FB_SYNC_OE_LOW_ACT 0x80000000
++#define FB_SYNC_CLK_LAT_FALL 0x40000000
++#define FB_SYNC_DATA_INVERT 0x20000000
++#define FB_SYNC_CLK_IDLE_EN 0x10000000
++#define FB_SYNC_SHARP_MODE 0x08000000
++#define FB_SYNC_SWAP_RGB 0x04000000
++#define FB_ACCEL_TRIPLE_FLAG 0x00000000
++#define FB_ACCEL_DOUBLE_FLAG 0x00000001
++
++struct mxcfb_gbl_alpha {
++ int enable;
++ int alpha;
++};
++
++struct mxcfb_loc_alpha {
++ int enable;
++ int alpha_in_pixel;
++ unsigned long alpha_phy_addr0;
++ unsigned long alpha_phy_addr1;
++};
++
++struct mxcfb_color_key {
++ int enable;
++ __u32 color_key;
++};
++
++struct mxcfb_pos {
++ __u16 x;
++ __u16 y;
++};
++
++struct mxcfb_gamma {
++ int enable;
++ int constk[16];
++ int slopek[16];
++};
++
++struct mxcfb_rect {
++ __u32 top;
++ __u32 left;
++ __u32 width;
++ __u32 height;
++};
++
++#define GRAYSCALE_8BIT 0x1
++#define GRAYSCALE_8BIT_INVERTED 0x2
++#define GRAYSCALE_4BIT 0x3
++#define GRAYSCALE_4BIT_INVERTED 0x4
++
++#define AUTO_UPDATE_MODE_REGION_MODE 0
++#define AUTO_UPDATE_MODE_AUTOMATIC_MODE 1
++
++#define UPDATE_SCHEME_SNAPSHOT 0
++#define UPDATE_SCHEME_QUEUE 1
++#define UPDATE_SCHEME_QUEUE_AND_MERGE 2
++
++#define UPDATE_MODE_PARTIAL 0x0
++#define UPDATE_MODE_FULL 0x1
++
++#define WAVEFORM_MODE_AUTO 257
++
++#define TEMP_USE_AMBIENT 0x1000
++
++#define EPDC_FLAG_ENABLE_INVERSION 0x01
++#define EPDC_FLAG_FORCE_MONOCHROME 0x02
++#define EPDC_FLAG_USE_CMAP 0x04
++#define EPDC_FLAG_USE_ALT_BUFFER 0x100
++#define EPDC_FLAG_TEST_COLLISION 0x200
++#define EPDC_FLAG_GROUP_UPDATE 0x400
++#define EPDC_FLAG_USE_DITHERING_Y1 0x2000
++#define EPDC_FLAG_USE_DITHERING_Y4 0x4000
++
++#define FB_POWERDOWN_DISABLE -1
++
++struct mxcfb_alt_buffer_data {
++ __u32 phys_addr;
++ __u32 width; /* width of entire buffer */
++ __u32 height; /* height of entire buffer */
++ struct mxcfb_rect alt_update_region; /* region within buffer to update */
++};
++
++struct mxcfb_update_data {
++ struct mxcfb_rect update_region;
++ __u32 waveform_mode;
++ __u32 update_mode;
++ __u32 update_marker;
++ int temp;
++ unsigned int flags;
++ struct mxcfb_alt_buffer_data alt_buffer_data;
++};
++
++struct mxcfb_update_marker_data {
++ __u32 update_marker;
++ __u32 collision_test;
++};
++
++/*
++ * Structure used to define waveform modes for driver
++ * Needed for driver to perform auto-waveform selection
++ */
++struct mxcfb_waveform_modes {
++ int mode_init;
++ int mode_du;
++ int mode_gc4;
++ int mode_gc8;
++ int mode_gc16;
++ int mode_gc32;
++};
++
++/*
++ * Structure used to define a 5*3 matrix of parameters for
++ * setting IPU DP CSC module related to this framebuffer.
++ */
++struct mxcfb_csc_matrix {
++ int param[5][3];
++};
++
++#define MXCFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t)
++#define MXCFB_SET_GBL_ALPHA _IOW('F', 0x21, struct mxcfb_gbl_alpha)
++#define MXCFB_SET_CLR_KEY _IOW('F', 0x22, struct mxcfb_color_key)
++#define MXCFB_SET_OVERLAY_POS _IOWR('F', 0x24, struct mxcfb_pos)
++#define MXCFB_GET_FB_IPU_CHAN _IOR('F', 0x25, u_int32_t)
++#define MXCFB_SET_LOC_ALPHA _IOWR('F', 0x26, struct mxcfb_loc_alpha)
++#define MXCFB_SET_LOC_ALP_BUF _IOW('F', 0x27, unsigned long)
++#define MXCFB_SET_GAMMA _IOW('F', 0x28, struct mxcfb_gamma)
++#define MXCFB_GET_FB_IPU_DI _IOR('F', 0x29, u_int32_t)
++#define MXCFB_GET_DIFMT _IOR('F', 0x2A, u_int32_t)
++#define MXCFB_GET_FB_BLANK _IOR('F', 0x2B, u_int32_t)
++#define MXCFB_SET_DIFMT _IOW('F', 0x2C, u_int32_t)
++#define MXCFB_CSC_UPDATE _IOW('F', 0x2D, struct mxcfb_csc_matrix)
++
++/* IOCTLs for E-ink panel updates */
++#define MXCFB_SET_WAVEFORM_MODES _IOW('F', 0x2B, struct mxcfb_waveform_modes)
++#define MXCFB_SET_TEMPERATURE _IOW('F', 0x2C, int32_t)
++#define MXCFB_SET_AUTO_UPDATE_MODE _IOW('F', 0x2D, __u32)
++#define MXCFB_SEND_UPDATE _IOW('F', 0x2E, struct mxcfb_update_data)
++#define MXCFB_WAIT_FOR_UPDATE_COMPLETE _IOWR('F', 0x2F, struct mxcfb_update_marker_data)
++#define MXCFB_SET_PWRDOWN_DELAY _IOW('F', 0x30, int32_t)
++#define MXCFB_GET_PWRDOWN_DELAY _IOR('F', 0x31, int32_t)
++#define MXCFB_SET_UPDATE_SCHEME _IOW('F', 0x32, __u32)
++#define MXCFB_GET_WORK_BUFFER _IOWR('F', 0x34, unsigned long)
++#define MXCFB_DISABLE_EPDC_ACCESS _IO('F', 0x35)
++#define MXCFB_ENABLE_EPDC_ACCESS _IO('F', 0x36)
++#endif
+diff -Nur linux-4.1.3/include/uapi/linux/mxc_mlb.h linux-xbian-imx6/include/uapi/linux/mxc_mlb.h
+--- linux-4.1.3/include/uapi/linux/mxc_mlb.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/linux/mxc_mlb.h 2015-07-27 23:13:09.865782301 +0200
+@@ -0,0 +1,55 @@
++/*
++ * mxc_mlb.h
++ *
++ * Copyright 2008-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++#ifndef _MXC_MLB_UAPI_H
++#define _MXC_MLB_UAPI_H
++
++/* define IOCTL command */
++#define MLB_DBG_RUNTIME _IO('S', 0x09)
++#define MLB_SET_FPS _IOW('S', 0x10, unsigned int)
++#define MLB_GET_VER _IOR('S', 0x11, unsigned long)
++#define MLB_SET_DEVADDR _IOR('S', 0x12, unsigned char)
++
++/*!
++ * set channel address for each logical channel
++ * the MSB 16bits is for tx channel, the left LSB is for rx channel
++ */
++#define MLB_CHAN_SETADDR _IOW('S', 0x13, unsigned int)
++#define MLB_CHAN_STARTUP _IO('S', 0x14)
++#define MLB_CHAN_SHUTDOWN _IO('S', 0x15)
++#define MLB_CHAN_GETEVENT _IOR('S', 0x16, unsigned long)
++
++#define MLB_SET_ISOC_BLKSIZE_188 _IO('S', 0x17)
++#define MLB_SET_ISOC_BLKSIZE_196 _IO('S', 0x18)
++#define MLB_SET_SYNC_QUAD _IOW('S', 0x19, unsigned int)
++#define MLB_IRQ_ENABLE _IO('S', 0x20)
++#define MLB_IRQ_DISABLE _IO('S', 0x21)
++
++/*!
++ * MLB event define
++ */
++enum {
++ MLB_EVT_TX_PROTO_ERR_CUR = 1 << 0,
++ MLB_EVT_TX_BRK_DETECT_CUR = 1 << 1,
++ MLB_EVT_TX_PROTO_ERR_PREV = 1 << 8,
++ MLB_EVT_TX_BRK_DETECT_PREV = 1 << 9,
++ MLB_EVT_RX_PROTO_ERR_CUR = 1 << 16,
++ MLB_EVT_RX_BRK_DETECT_CUR = 1 << 17,
++ MLB_EVT_RX_PROTO_ERR_PREV = 1 << 24,
++ MLB_EVT_RX_BRK_DETECT_PREV = 1 << 25,
++};
++
++
++#endif /* _MXC_MLB_H */
+diff -Nur linux-4.1.3/include/uapi/linux/mxc_v4l2.h linux-xbian-imx6/include/uapi/linux/mxc_v4l2.h
+--- linux-4.1.3/include/uapi/linux/mxc_v4l2.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/linux/mxc_v4l2.h 2015-07-27 23:13:09.865782301 +0200
+@@ -0,0 +1,56 @@
++/*
++ * Copyright (C) 2013 Freescale Semiconductor, Inc. All Rights Reserved
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++/*!
++ * @file uapi/linux/mxc_v4l2.h
++ *
++ * @brief MXC V4L2 private header file
++ *
++ * @ingroup MXC V4L2
++ */
++
++#ifndef __ASM_ARCH_MXC_V4L2_H__
++#define __ASM_ARCH_MXC_V4L2_H__
++
++/*
++ * For IPUv1 and IPUv3, V4L2_CID_MXC_ROT means encoder ioctl ID.
++ * And V4L2_CID_MXC_VF_ROT is viewfinder ioctl ID only for IPUv1 and IPUv3.
++ */
++#define V4L2_CID_MXC_ROT (V4L2_CID_PRIVATE_BASE + 0)
++#define V4L2_CID_MXC_FLASH (V4L2_CID_PRIVATE_BASE + 1)
++#define V4L2_CID_MXC_VF_ROT (V4L2_CID_PRIVATE_BASE + 2)
++#define V4L2_CID_MXC_MOTION (V4L2_CID_PRIVATE_BASE + 3)
++#define V4L2_CID_MXC_SWITCH_CAM (V4L2_CID_PRIVATE_BASE + 6)
++
++#define V4L2_MXC_ROTATE_NONE 0
++#define V4L2_MXC_ROTATE_VERT_FLIP 1
++#define V4L2_MXC_ROTATE_HORIZ_FLIP 2
++#define V4L2_MXC_ROTATE_180 3
++#define V4L2_MXC_ROTATE_90_RIGHT 4
++#define V4L2_MXC_ROTATE_90_RIGHT_VFLIP 5
++#define V4L2_MXC_ROTATE_90_RIGHT_HFLIP 6
++#define V4L2_MXC_ROTATE_90_LEFT 7
++
++struct v4l2_mxc_offset {
++ uint32_t u_offset;
++ uint32_t v_offset;
++};
++
++#endif
+diff -Nur linux-4.1.3/include/uapi/linux/pxp_device.h linux-xbian-imx6/include/uapi/linux/pxp_device.h
+--- linux-4.1.3/include/uapi/linux/pxp_device.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/linux/pxp_device.h 2015-07-27 23:13:09.941512106 +0200
+@@ -0,0 +1,63 @@
++/*
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _UAPI_PXP_DEVICE
++#define _UAPI_PXP_DEVICE
++
++#include <linux/pxp_dma.h>
++
++struct pxp_chan_handle {
++ unsigned int handle;
++ int hist_status;
++};
++
++struct pxp_mem_desc {
++ unsigned int handle;
++ unsigned int size;
++ dma_addr_t phys_addr;
++ unsigned int virt_uaddr; /* virtual user space address */
++ unsigned int mtype;
++};
++
++struct pxp_mem_flush {
++ unsigned int handle;
++ unsigned int type;
++};
++
++#define PXP_IOC_MAGIC 'P'
++
++#define PXP_IOC_GET_CHAN _IOR(PXP_IOC_MAGIC, 0, struct pxp_mem_desc)
++#define PXP_IOC_PUT_CHAN _IOW(PXP_IOC_MAGIC, 1, struct pxp_mem_desc)
++#define PXP_IOC_CONFIG_CHAN _IOW(PXP_IOC_MAGIC, 2, struct pxp_mem_desc)
++#define PXP_IOC_START_CHAN _IOW(PXP_IOC_MAGIC, 3, struct pxp_mem_desc)
++#define PXP_IOC_GET_PHYMEM _IOWR(PXP_IOC_MAGIC, 4, struct pxp_mem_desc)
++#define PXP_IOC_PUT_PHYMEM _IOW(PXP_IOC_MAGIC, 5, struct pxp_mem_desc)
++#define PXP_IOC_WAIT4CMPLT _IOWR(PXP_IOC_MAGIC, 6, struct pxp_mem_desc)
++#define PXP_IOC_FLUSH_PHYMEM _IOR(PXP_IOC_MAGIC, 7, struct pxp_mem_flush)
++
++/* Memory types supported*/
++#define MEMORY_TYPE_UNCACHED 0x0
++#define MEMORY_TYPE_WC 0x1
++#define MEMORY_TYPE_CACHED 0x2
++
++/* Cache flush operations */
++#define CACHE_CLEAN 0x1
++#define CACHE_INVALIDATE 0x2
++#define CACHE_FLUSH 0x4
++
++#endif
+diff -Nur linux-4.1.3/include/uapi/linux/pxp_dma.h linux-xbian-imx6/include/uapi/linux/pxp_dma.h
+--- linux-4.1.3/include/uapi/linux/pxp_dma.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/linux/pxp_dma.h 2015-07-27 23:13:09.941512106 +0200
+@@ -0,0 +1,173 @@
++/*
++ * Copyright (C) 2013-2015 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef _UAPI_PXP_DMA
++#define _UAPI_PXP_DMA
++
++#include <linux/posix_types.h>
++#include <linux/types.h>
++
++#ifndef __KERNEL__
++typedef unsigned long dma_addr_t;
++typedef unsigned char bool;
++#endif
++
++/* PXP Pixel format definitions */
++/* Four-character-code (FOURCC) */
++#define fourcc(a, b, c, d)\
++ (((__u32)(a)<<0)|((__u32)(b)<<8)|((__u32)(c)<<16)|((__u32)(d)<<24))
++
++/*!
++ * @name PXP Pixel Formats
++ *
++ * Pixel formats are defined with ASCII FOURCC code. The pixel format codes are
++ * the same used by V4L2 API.
++ */
++
++/*! @} */
++/*! @name RGB Formats */
++/*! @{ */
++#define PXP_PIX_FMT_RGB332 fourcc('R', 'G', 'B', '1') /*!< 8 RGB-3-3-2 */
++#define PXP_PIX_FMT_RGB555 fourcc('R', 'G', 'B', 'O') /*!< 16 RGB-5-5-5 */
++#define PXP_PIX_FMT_RGB565 fourcc('R', 'G', 'B', 'P') /*!< 1 6 RGB-5-6-5 */
++#define PXP_PIX_FMT_RGB666 fourcc('R', 'G', 'B', '6') /*!< 18 RGB-6-6-6 */
++#define PXP_PIX_FMT_BGR666 fourcc('B', 'G', 'R', '6') /*!< 18 BGR-6-6-6 */
++#define PXP_PIX_FMT_BGR24 fourcc('B', 'G', 'R', '3') /*!< 24 BGR-8-8-8 */
++#define PXP_PIX_FMT_RGB24 fourcc('R', 'G', 'B', '3') /*!< 24 RGB-8-8-8 */
++#define PXP_PIX_FMT_BGR32 fourcc('B', 'G', 'R', '4') /*!< 32 BGR-8-8-8-8 */
++#define PXP_PIX_FMT_BGRA32 fourcc('B', 'G', 'R', 'A') /*!< 32 BGR-8-8-8-8 */
++#define PXP_PIX_FMT_RGB32 fourcc('R', 'G', 'B', '4') /*!< 32 RGB-8-8-8-8 */
++#define PXP_PIX_FMT_RGBA32 fourcc('R', 'G', 'B', 'A') /*!< 32 RGB-8-8-8-8 */
++#define PXP_PIX_FMT_ABGR32 fourcc('A', 'B', 'G', 'R') /*!< 32 ABGR-8-8-8-8 */
++/*! @} */
++/*! @name YUV Interleaved Formats */
++/*! @{ */
++#define PXP_PIX_FMT_YUYV fourcc('Y', 'U', 'Y', 'V') /*!< 16 YUV 4:2:2 */
++#define PXP_PIX_FMT_UYVY fourcc('U', 'Y', 'V', 'Y') /*!< 16 YUV 4:2:2 */
++#define PXP_PIX_FMT_VYUY fourcc('V', 'Y', 'U', 'Y') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_YVYU fourcc('Y', 'V', 'Y', 'U') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_Y41P fourcc('Y', '4', '1', 'P') /*!< 12 YUV 4:1:1 */
++#define PXP_PIX_FMT_VUY444 fourcc('V', 'U', 'Y', 'A') /*!< 32 VUYA 8:8:8 */
++/* two planes -- one Y, one Cb + Cr interleaved */
++#define PXP_PIX_FMT_NV12 fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
++#define PXP_PIX_FMT_NV21 fourcc('N', 'V', '2', '1') /* 12 Y/CbCr 4:2:0 */
++#define PXP_PIX_FMT_NV16 fourcc('N', 'V', '1', '6') /* 12 Y/CbCr 4:2:2 */
++#define PXP_PIX_FMT_NV61 fourcc('N', 'V', '6', '1') /* 12 Y/CbCr 4:2:2 */
++/*! @} */
++/*! @name YUV Planar Formats */
++/*! @{ */
++#define PXP_PIX_FMT_GREY fourcc('G', 'R', 'E', 'Y') /*!< 8 Greyscale */
++#define PXP_PIX_FMT_GY04 fourcc('G', 'Y', '0', '4') /*!< 4 Greyscale */
++#define PXP_PIX_FMT_YVU410P fourcc('Y', 'V', 'U', '9') /*!< 9 YVU 4:1:0 */
++#define PXP_PIX_FMT_YUV410P fourcc('Y', 'U', 'V', '9') /*!< 9 YUV 4:1:0 */
++#define PXP_PIX_FMT_YVU420P fourcc('Y', 'V', '1', '2') /*!< 12 YVU 4:2:0 */
++#define PXP_PIX_FMT_YUV420P fourcc('I', '4', '2', '0') /*!< 12 YUV 4:2:0 */
++#define PXP_PIX_FMT_YUV420P2 fourcc('Y', 'U', '1', '2') /*!< 12 YUV 4:2:0 */
++#define PXP_PIX_FMT_YVU422P fourcc('Y', 'V', '1', '6') /*!< 16 YVU 4:2:2 */
++#define PXP_PIX_FMT_YUV422P fourcc('4', '2', '2', 'P') /*!< 16 YUV 4:2:2 */
++/*! @} */
++
++#define PXP_LUT_NONE 0x0
++#define PXP_LUT_INVERT 0x1
++#define PXP_LUT_BLACK_WHITE 0x2
++#define PXP_LUT_USE_CMAP 0x4
++
++#define NR_PXP_VIRT_CHANNEL 16
++
++/* Order significant! */
++enum pxp_channel_status {
++ PXP_CHANNEL_FREE,
++ PXP_CHANNEL_INITIALIZED,
++ PXP_CHANNEL_READY,
++};
++
++struct rect {
++ int top; /* Upper left coordinate of rectangle */
++ int left;
++ int width;
++ int height;
++};
++
++struct pxp_layer_param {
++ unsigned short width;
++ unsigned short height;
++ unsigned short stride; /* aka pitch */
++ unsigned int pixel_fmt;
++
++ /* layers combining parameters
++ * (these are ignored for S0 and output
++ * layers, and only apply for OL layer)
++ */
++ bool combine_enable;
++ unsigned int color_key_enable;
++ unsigned int color_key;
++ bool global_alpha_enable;
++ /* global alpha is either override or multiply */
++ bool global_override;
++ unsigned char global_alpha;
++ bool alpha_invert;
++ bool local_alpha_enable;
++
++ dma_addr_t paddr;
++};
++
++struct pxp_proc_data {
++ /* S0 Transformation Info */
++ int scaling;
++ int hflip;
++ int vflip;
++ int rotate;
++ int rot_pos;
++ int yuv;
++
++ /* Source rectangle (srect) defines the sub-rectangle
++ * within S0 to undergo processing.
++ */
++ struct rect srect;
++ /* Dest rect (drect) defines how to position the processed
++ * source rectangle (after resizing) within the output frame,
++ * whose dimensions are defined in pxp->pxp_conf_state.out_param
++ */
++ struct rect drect;
++
++ /* Current S0 configuration */
++ unsigned int bgcolor;
++
++ /* Output overlay support */
++ int overlay_state;
++
++ /* LUT transformation on Y data */
++ int lut_transform;
++ unsigned char *lut_map; /* 256 entries */
++ bool lut_map_updated; /* Map recently changed */
++ bool combine_enable;
++};
++
++struct pxp_config_data {
++ struct pxp_layer_param s0_param;
++ struct pxp_layer_param ol_param[8];
++ struct pxp_layer_param out_param;
++ struct pxp_proc_data proc_data;
++ int layer_nr;
++
++ /* Users don't touch */
++ int handle;
++};
++
++
++#endif
+diff -Nur linux-4.1.3/include/uapi/sync.h linux-xbian-imx6/include/uapi/sync.h
+--- linux-4.1.3/include/uapi/sync.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/uapi/sync.h 2015-07-27 23:13:09.977384118 +0200
+@@ -0,0 +1,97 @@
++/*
++ * Copyright (C) 2012 Google, Inc.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ */
++
++#ifndef _UAPI_LINUX_SYNC_H
++#define _UAPI_LINUX_SYNC_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++/**
++ * struct sync_merge_data - data passed to merge ioctl
++ * @fd2: file descriptor of second fence
++ * @name: name of new fence
++ * @fence: returns the fd of the new fence to userspace
++ */
++struct sync_merge_data {
++ __s32 fd2; /* fd of second fence */
++ char name[32]; /* name of new fence */
++ __s32 fence; /* fd on newly created fence */
++};
++
++/**
++ * struct sync_pt_info - detailed sync_pt information
++ * @len: length of sync_pt_info including any driver_data
++ * @obj_name: name of parent sync_timeline
++ * @driver_name: name of driver implementing the parent
++ * @status: status of the sync_pt 0:active 1:signaled <0:error
++ * @timestamp_ns: timestamp of status change in nanoseconds
++ * @driver_data: any driver dependent data
++ */
++struct sync_pt_info {
++ __u32 len;
++ char obj_name[32];
++ char driver_name[32];
++ __s32 status;
++ __u64 timestamp_ns;
++
++ __u8 driver_data[0];
++};
++
++/**
++ * struct sync_fence_info_data - data returned from fence info ioctl
++ * @len: ioctl caller writes the size of the buffer its passing in.
++ * ioctl returns length of sync_fence_data returned to userspace
++ * including pt_info.
++ * @name: name of fence
++ * @status: status of fence. 1: signaled 0:active <0:error
++ * @pt_info: a sync_pt_info struct for every sync_pt in the fence
++ */
++struct sync_fence_info_data {
++ __u32 len;
++ char name[32];
++ __s32 status;
++
++ __u8 pt_info[0];
++};
++
++#define SYNC_IOC_MAGIC '>'
++
++/**
++ * DOC: SYNC_IOC_WAIT - wait for a fence to signal
++ *
++ * pass timeout in milliseconds. Waits indefinitely timeout < 0.
++ */
++#define SYNC_IOC_WAIT _IOW(SYNC_IOC_MAGIC, 0, __s32)
++
++/**
++ * DOC: SYNC_IOC_MERGE - merge two fences
++ *
++ * Takes a struct sync_merge_data. Creates a new fence containing copies of
++ * the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
++ * new fence's fd in sync_merge_data.fence
++ */
++#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
++
++/**
++ * DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
++ *
++ * Takes a struct sync_fence_info_data with extra space allocated for pt_info.
++ * Caller should write the size of the buffer into len. On return, len is
++ * updated to reflect the total size of the sync_fence_info_data including
++ * pt_info.
++ *
++ * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
++ * To iterate over the sync_pt_infos, use the sync_pt_info.len field.
++ */
++#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\
++ struct sync_fence_info_data)
++
++#endif /* _UAPI_LINUX_SYNC_H */
+diff -Nur linux-4.1.3/include/usb-ehci-mxc.h linux-xbian-imx6/include/usb-ehci-mxc.h
+--- linux-4.1.3/include/usb-ehci-mxc.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/usb-ehci-mxc.h 2015-07-27 23:13:09.981369897 +0200
+@@ -0,0 +1,13 @@
++#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
++#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
++
++struct mxc_usbh_platform_data {
++ int (*init)(struct platform_device *pdev);
++ int (*exit)(struct platform_device *pdev);
++
++ unsigned int portsc;
++ struct usb_phy *otg;
++};
++
++#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
++
+diff -Nur linux-4.1.3/include/video/mxc_edid.h linux-xbian-imx6/include/video/mxc_edid.h
+--- linux-4.1.3/include/video/mxc_edid.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/video/mxc_edid.h 2015-07-27 23:13:09.981369897 +0200
+@@ -0,0 +1,108 @@
++/*
++ * Copyright 2009-2013 Freescale Semiconductor, Inc. All Rights Reserved.
++ */
++
++/*
++ * The code contained herein is licensed under the GNU General Public
++ * License. You may obtain a copy of the GNU General Public License
++ * Version 2 or later at the following locations:
++ *
++ * http://www.opensource.org/licenses/gpl-license.html
++ * http://www.gnu.org/copyleft/gpl.html
++ */
++
++/*!
++ * @defgroup Framebuffer Framebuffer Driver for SDC and ADC.
++ */
++
++/*!
++ * @file mxc_edid.h
++ *
++ * @brief MXC EDID tools
++ *
++ * @ingroup Framebuffer
++ */
++
++#ifndef MXC_EDID_H
++#define MXC_EDID_H
++
++#include <linux/fb.h>
++
++enum cea_audio_coding_types {
++ AUDIO_CODING_TYPE_REF_STREAM_HEADER = 0,
++ AUDIO_CODING_TYPE_LPCM = 1,
++ AUDIO_CODING_TYPE_AC3 = 2,
++ AUDIO_CODING_TYPE_MPEG1 = 3,
++ AUDIO_CODING_TYPE_MP3 = 4,
++ AUDIO_CODING_TYPE_MPEG2 = 5,
++ AUDIO_CODING_TYPE_AACLC = 6,
++ AUDIO_CODING_TYPE_DTS = 7,
++ AUDIO_CODING_TYPE_ATRAC = 8,
++ AUDIO_CODING_TYPE_SACD = 9,
++ AUDIO_CODING_TYPE_EAC3 = 10,
++ AUDIO_CODING_TYPE_DTS_HD = 11,
++ AUDIO_CODING_TYPE_MLP = 12,
++ AUDIO_CODING_TYPE_DST = 13,
++ AUDIO_CODING_TYPE_WMAPRO = 14,
++ AUDIO_CODING_TYPE_RESERVED = 15,
++};
++
++struct mxc_hdmi_3d_format {
++ unsigned char vic_order_2d;
++ unsigned char struct_3d;
++ unsigned char detail_3d;
++ unsigned char reserved;
++};
++
++struct mxc_edid_cfg {
++ bool cea_underscan;
++ bool cea_basicaudio;
++ bool cea_ycbcr444;
++ bool cea_ycbcr422;
++ bool hdmi_cap;
++
++ /*VSD*/
++ bool vsd_support_ai;
++ bool vsd_dc_48bit;
++ bool vsd_dc_36bit;
++ bool vsd_dc_30bit;
++ bool vsd_dc_y444;
++ bool vsd_dvi_dual;
++
++ bool vsd_cnc0;
++ bool vsd_cnc1;
++ bool vsd_cnc2;
++ bool vsd_cnc3;
++
++ u8 vsd_video_latency;
++ u8 vsd_audio_latency;
++ u8 vsd_I_video_latency;
++ u8 vsd_I_audio_latency;
++
++ u8 physical_address[4];
++ u8 hdmi_vic[64];
++ struct mxc_hdmi_3d_format hdmi_3d_format[64];
++ u16 hdmi_3d_mask_all;
++ u16 hdmi_3d_struct_all;
++ unsigned char hdmi_3d_present;
++ unsigned char hdmi_3d_len;
++ unsigned char hdmi_3d_multi_present;
++ u32 vsd_max_tmdsclk_rate;
++
++ u8 max_channels;
++ u8 sample_sizes;
++ u8 sample_rates;
++ u8 speaker_alloc;
++};
++
++int mxc_edid_var_to_vic(struct fb_var_screeninfo *var);
++int mxc_edid_mode_to_vic(const struct fb_videomode *mode, u32 mode_mask);
++int mxc_edid_read(struct i2c_adapter *adp, unsigned short addr,
++ unsigned char *edid, struct mxc_edid_cfg *cfg, struct fb_info *fbi);
++int mxc_edid_parse_ext_blk(unsigned char *edid, struct mxc_edid_cfg *cfg,
++ struct fb_monspecs *specs);
++const struct fb_videomode *mxc_fb_find_nearest_mode(const struct fb_videomode *mode,
++ struct list_head *head);
++int mxc_edid_fb_mode_is_equal(bool use_aspect, const struct fb_videomode *mode1,
++ const struct fb_videomode *mode2, u32 mode_mask);
++#endif
+diff -Nur linux-4.1.3/include/video/mxc_hdmi.h linux-xbian-imx6/include/video/mxc_hdmi.h
+--- linux-4.1.3/include/video/mxc_hdmi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/include/video/mxc_hdmi.h 2015-07-27 23:13:09.981369897 +0200
+@@ -0,0 +1,1027 @@
++/*
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ */
++
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __MXC_HDMI_H__
++#define __MXC_HDMI_H__
++
++/*
++ * Hdmi controller registers
++ */
++
++/* Identification Registers */
++#define HDMI_DESIGN_ID 0x0000
++#define HDMI_REVISION_ID 0x0001
++#define HDMI_PRODUCT_ID0 0x0002
++#define HDMI_PRODUCT_ID1 0x0003
++#define HDMI_CONFIG0_ID 0x0004
++#define HDMI_CONFIG1_ID 0x0005
++#define HDMI_CONFIG2_ID 0x0006
++#define HDMI_CONFIG3_ID 0x0007
++
++/* Interrupt Registers */
++#define HDMI_IH_FC_STAT0 0x0100
++#define HDMI_IH_FC_STAT1 0x0101
++#define HDMI_IH_FC_STAT2 0x0102
++#define HDMI_IH_AS_STAT0 0x0103
++#define HDMI_IH_PHY_STAT0 0x0104
++#define HDMI_IH_I2CM_STAT0 0x0105
++#define HDMI_IH_CEC_STAT0 0x0106
++#define HDMI_IH_VP_STAT0 0x0107
++#define HDMI_IH_I2CMPHY_STAT0 0x0108
++#define HDMI_IH_AHBDMAAUD_STAT0 0x0109
++
++#define HDMI_IH_MUTE_FC_STAT0 0x0180
++#define HDMI_IH_MUTE_FC_STAT1 0x0181
++#define HDMI_IH_MUTE_FC_STAT2 0x0182
++#define HDMI_IH_MUTE_AS_STAT0 0x0183
++#define HDMI_IH_MUTE_PHY_STAT0 0x0184
++#define HDMI_IH_MUTE_I2CM_STAT0 0x0185
++#define HDMI_IH_MUTE_CEC_STAT0 0x0186
++#define HDMI_IH_MUTE_VP_STAT0 0x0187
++#define HDMI_IH_MUTE_I2CMPHY_STAT0 0x0188
++#define HDMI_IH_MUTE_AHBDMAAUD_STAT0 0x0189
++#define HDMI_IH_MUTE 0x01FF
++
++/* Video Sample Registers */
++#define HDMI_TX_INVID0 0x0200
++#define HDMI_TX_INSTUFFING 0x0201
++#define HDMI_TX_GYDATA0 0x0202
++#define HDMI_TX_GYDATA1 0x0203
++#define HDMI_TX_RCRDATA0 0x0204
++#define HDMI_TX_RCRDATA1 0x0205
++#define HDMI_TX_BCBDATA0 0x0206
++#define HDMI_TX_BCBDATA1 0x0207
++
++/* Video Packetizer Registers */
++#define HDMI_VP_STATUS 0x0800
++#define HDMI_VP_PR_CD 0x0801
++#define HDMI_VP_STUFF 0x0802
++#define HDMI_VP_REMAP 0x0803
++#define HDMI_VP_CONF 0x0804
++#define HDMI_VP_STAT 0x0805
++#define HDMI_VP_INT 0x0806
++#define HDMI_VP_MASK 0x0807
++#define HDMI_VP_POL 0x0808
++
++/* Frame Composer Registers */
++#define HDMI_FC_INVIDCONF 0x1000
++#define HDMI_FC_INHACTV0 0x1001
++#define HDMI_FC_INHACTV1 0x1002
++#define HDMI_FC_INHBLANK0 0x1003
++#define HDMI_FC_INHBLANK1 0x1004
++#define HDMI_FC_INVACTV0 0x1005
++#define HDMI_FC_INVACTV1 0x1006
++#define HDMI_FC_INVBLANK 0x1007
++#define HDMI_FC_HSYNCINDELAY0 0x1008
++#define HDMI_FC_HSYNCINDELAY1 0x1009
++#define HDMI_FC_HSYNCINWIDTH0 0x100A
++#define HDMI_FC_HSYNCINWIDTH1 0x100B
++#define HDMI_FC_VSYNCINDELAY 0x100C
++#define HDMI_FC_VSYNCINWIDTH 0x100D
++#define HDMI_FC_INFREQ0 0x100E
++#define HDMI_FC_INFREQ1 0x100F
++#define HDMI_FC_INFREQ2 0x1010
++#define HDMI_FC_CTRLDUR 0x1011
++#define HDMI_FC_EXCTRLDUR 0x1012
++#define HDMI_FC_EXCTRLSPAC 0x1013
++#define HDMI_FC_CH0PREAM 0x1014
++#define HDMI_FC_CH1PREAM 0x1015
++#define HDMI_FC_CH2PREAM 0x1016
++#define HDMI_FC_AVICONF3 0x1017
++#define HDMI_FC_GCP 0x1018
++#define HDMI_FC_AVICONF0 0x1019
++#define HDMI_FC_AVICONF1 0x101A
++#define HDMI_FC_AVICONF2 0x101B
++#define HDMI_FC_AVIVID 0x101C
++#define HDMI_FC_AVIETB0 0x101D
++#define HDMI_FC_AVIETB1 0x101E
++#define HDMI_FC_AVISBB0 0x101F
++#define HDMI_FC_AVISBB1 0x1020
++#define HDMI_FC_AVIELB0 0x1021
++#define HDMI_FC_AVIELB1 0x1022
++#define HDMI_FC_AVISRB0 0x1023
++#define HDMI_FC_AVISRB1 0x1024
++#define HDMI_FC_AUDICONF0 0x1025
++#define HDMI_FC_AUDICONF1 0x1026
++#define HDMI_FC_AUDICONF2 0x1027
++#define HDMI_FC_AUDICONF3 0x1028
++#define HDMI_FC_VSDIEEEID0 0x1029
++#define HDMI_FC_VSDSIZE 0x102A
++#define HDMI_FC_VSDIEEEID1 0x1030
++#define HDMI_FC_VSDIEEEID2 0x1031
++#define HDMI_FC_VSDPAYLOAD0 0x1032
++#define HDMI_FC_VSDPAYLOAD1 0x1033
++#define HDMI_FC_VSDPAYLOAD2 0x1034
++#define HDMI_FC_VSDPAYLOAD3 0x1035
++#define HDMI_FC_VSDPAYLOAD4 0x1036
++#define HDMI_FC_VSDPAYLOAD5 0x1037
++#define HDMI_FC_VSDPAYLOAD6 0x1038
++#define HDMI_FC_VSDPAYLOAD7 0x1039
++#define HDMI_FC_VSDPAYLOAD8 0x103A
++#define HDMI_FC_VSDPAYLOAD9 0x103B
++#define HDMI_FC_VSDPAYLOAD10 0x103C
++#define HDMI_FC_VSDPAYLOAD11 0x103D
++#define HDMI_FC_VSDPAYLOAD12 0x103E
++#define HDMI_FC_VSDPAYLOAD13 0x103F
++#define HDMI_FC_VSDPAYLOAD14 0x1040
++#define HDMI_FC_VSDPAYLOAD15 0x1041
++#define HDMI_FC_VSDPAYLOAD16 0x1042
++#define HDMI_FC_VSDPAYLOAD17 0x1043
++#define HDMI_FC_VSDPAYLOAD18 0x1044
++#define HDMI_FC_VSDPAYLOAD19 0x1045
++#define HDMI_FC_VSDPAYLOAD20 0x1046
++#define HDMI_FC_VSDPAYLOAD21 0x1047
++#define HDMI_FC_VSDPAYLOAD22 0x1048
++#define HDMI_FC_VSDPAYLOAD23 0x1049
++#define HDMI_FC_SPDVENDORNAME0 0x104A
++#define HDMI_FC_SPDVENDORNAME1 0x104B
++#define HDMI_FC_SPDVENDORNAME2 0x104C
++#define HDMI_FC_SPDVENDORNAME3 0x104D
++#define HDMI_FC_SPDVENDORNAME4 0x104E
++#define HDMI_FC_SPDVENDORNAME5 0x104F
++#define HDMI_FC_SPDVENDORNAME6 0x1050
++#define HDMI_FC_SPDVENDORNAME7 0x1051
++#define HDMI_FC_SDPPRODUCTNAME0 0x1052
++#define HDMI_FC_SDPPRODUCTNAME1 0x1053
++#define HDMI_FC_SDPPRODUCTNAME2 0x1054
++#define HDMI_FC_SDPPRODUCTNAME3 0x1055
++#define HDMI_FC_SDPPRODUCTNAME4 0x1056
++#define HDMI_FC_SDPPRODUCTNAME5 0x1057
++#define HDMI_FC_SDPPRODUCTNAME6 0x1058
++#define HDMI_FC_SDPPRODUCTNAME7 0x1059
++#define HDMI_FC_SDPPRODUCTNAME8 0x105A
++#define HDMI_FC_SDPPRODUCTNAME9 0x105B
++#define HDMI_FC_SDPPRODUCTNAME10 0x105C
++#define HDMI_FC_SDPPRODUCTNAME11 0x105D
++#define HDMI_FC_SDPPRODUCTNAME12 0x105E
++#define HDMI_FC_SDPPRODUCTNAME13 0x105F
++#define HDMI_FC_SDPPRODUCTNAME14 0x1060
++#define HDMI_FC_SPDPRODUCTNAME15 0x1061
++#define HDMI_FC_SPDDEVICEINF 0x1062
++#define HDMI_FC_AUDSCONF 0x1063
++#define HDMI_FC_AUDSSTAT 0x1064
++#define HDMI_FC_DATACH0FILL 0x1070
++#define HDMI_FC_DATACH1FILL 0x1071
++#define HDMI_FC_DATACH2FILL 0x1072
++#define HDMI_FC_CTRLQHIGH 0x1073
++#define HDMI_FC_CTRLQLOW 0x1074
++#define HDMI_FC_ACP0 0x1075
++#define HDMI_FC_ACP28 0x1076
++#define HDMI_FC_ACP27 0x1077
++#define HDMI_FC_ACP26 0x1078
++#define HDMI_FC_ACP25 0x1079
++#define HDMI_FC_ACP24 0x107A
++#define HDMI_FC_ACP23 0x107B
++#define HDMI_FC_ACP22 0x107C
++#define HDMI_FC_ACP21 0x107D
++#define HDMI_FC_ACP20 0x107E
++#define HDMI_FC_ACP19 0x107F
++#define HDMI_FC_ACP18 0x1080
++#define HDMI_FC_ACP17 0x1081
++#define HDMI_FC_ACP16 0x1082
++#define HDMI_FC_ACP15 0x1083
++#define HDMI_FC_ACP14 0x1084
++#define HDMI_FC_ACP13 0x1085
++#define HDMI_FC_ACP12 0x1086
++#define HDMI_FC_ACP11 0x1087
++#define HDMI_FC_ACP10 0x1088
++#define HDMI_FC_ACP9 0x1089
++#define HDMI_FC_ACP8 0x108A
++#define HDMI_FC_ACP7 0x108B
++#define HDMI_FC_ACP6 0x108C
++#define HDMI_FC_ACP5 0x108D
++#define HDMI_FC_ACP4 0x108E
++#define HDMI_FC_ACP3 0x108F
++#define HDMI_FC_ACP2 0x1090
++#define HDMI_FC_ACP1 0x1091
++#define HDMI_FC_ISCR1_0 0x1092
++#define HDMI_FC_ISCR1_16 0x1093
++#define HDMI_FC_ISCR1_15 0x1094
++#define HDMI_FC_ISCR1_14 0x1095
++#define HDMI_FC_ISCR1_13 0x1096
++#define HDMI_FC_ISCR1_12 0x1097
++#define HDMI_FC_ISCR1_11 0x1098
++#define HDMI_FC_ISCR1_10 0x1099
++#define HDMI_FC_ISCR1_9 0x109A
++#define HDMI_FC_ISCR1_8 0x109B
++#define HDMI_FC_ISCR1_7 0x109C
++#define HDMI_FC_ISCR1_6 0x109D
++#define HDMI_FC_ISCR1_5 0x109E
++#define HDMI_FC_ISCR1_4 0x109F
++#define HDMI_FC_ISCR1_3 0x10A0
++#define HDMI_FC_ISCR1_2 0x10A1
++#define HDMI_FC_ISCR1_1 0x10A2
++#define HDMI_FC_ISCR2_15 0x10A3
++#define HDMI_FC_ISCR2_14 0x10A4
++#define HDMI_FC_ISCR2_13 0x10A5
++#define HDMI_FC_ISCR2_12 0x10A6
++#define HDMI_FC_ISCR2_11 0x10A7
++#define HDMI_FC_ISCR2_10 0x10A8
++#define HDMI_FC_ISCR2_9 0x10A9
++#define HDMI_FC_ISCR2_8 0x10AA
++#define HDMI_FC_ISCR2_7 0x10AB
++#define HDMI_FC_ISCR2_6 0x10AC
++#define HDMI_FC_ISCR2_5 0x10AD
++#define HDMI_FC_ISCR2_4 0x10AE
++#define HDMI_FC_ISCR2_3 0x10AF
++#define HDMI_FC_ISCR2_2 0x10B0
++#define HDMI_FC_ISCR2_1 0x10B1
++#define HDMI_FC_ISCR2_0 0x10B2
++#define HDMI_FC_DATAUTO0 0x10B3
++#define HDMI_FC_DATAUTO1 0x10B4
++#define HDMI_FC_DATAUTO2 0x10B5
++#define HDMI_FC_DATMAN 0x10B6
++#define HDMI_FC_DATAUTO3 0x10B7
++#define HDMI_FC_RDRB0 0x10B8
++#define HDMI_FC_RDRB1 0x10B9
++#define HDMI_FC_RDRB2 0x10BA
++#define HDMI_FC_RDRB3 0x10BB
++#define HDMI_FC_RDRB4 0x10BC
++#define HDMI_FC_RDRB5 0x10BD
++#define HDMI_FC_RDRB6 0x10BE
++#define HDMI_FC_RDRB7 0x10BF
++#define HDMI_FC_STAT0 0x10D0
++#define HDMI_FC_INT0 0x10D1
++#define HDMI_FC_MASK0 0x10D2
++#define HDMI_FC_POL0 0x10D3
++#define HDMI_FC_STAT1 0x10D4
++#define HDMI_FC_INT1 0x10D5
++#define HDMI_FC_MASK1 0x10D6
++#define HDMI_FC_POL1 0x10D7
++#define HDMI_FC_STAT2 0x10D8
++#define HDMI_FC_INT2 0x10D9
++#define HDMI_FC_MASK2 0x10DA
++#define HDMI_FC_POL2 0x10DB
++#define HDMI_FC_PRCONF 0x10E0
++
++#define HDMI_FC_GMD_STAT 0x1100
++#define HDMI_FC_GMD_EN 0x1101
++#define HDMI_FC_GMD_UP 0x1102
++#define HDMI_FC_GMD_CONF 0x1103
++#define HDMI_FC_GMD_HB 0x1104
++#define HDMI_FC_GMD_PB0 0x1105
++#define HDMI_FC_GMD_PB1 0x1106
++#define HDMI_FC_GMD_PB2 0x1107
++#define HDMI_FC_GMD_PB3 0x1108
++#define HDMI_FC_GMD_PB4 0x1109
++#define HDMI_FC_GMD_PB5 0x110A
++#define HDMI_FC_GMD_PB6 0x110B
++#define HDMI_FC_GMD_PB7 0x110C
++#define HDMI_FC_GMD_PB8 0x110D
++#define HDMI_FC_GMD_PB9 0x110E
++#define HDMI_FC_GMD_PB10 0x110F
++#define HDMI_FC_GMD_PB11 0x1110
++#define HDMI_FC_GMD_PB12 0x1111
++#define HDMI_FC_GMD_PB13 0x1112
++#define HDMI_FC_GMD_PB14 0x1113
++#define HDMI_FC_GMD_PB15 0x1114
++#define HDMI_FC_GMD_PB16 0x1115
++#define HDMI_FC_GMD_PB17 0x1116
++#define HDMI_FC_GMD_PB18 0x1117
++#define HDMI_FC_GMD_PB19 0x1118
++#define HDMI_FC_GMD_PB20 0x1119
++#define HDMI_FC_GMD_PB21 0x111A
++#define HDMI_FC_GMD_PB22 0x111B
++#define HDMI_FC_GMD_PB23 0x111C
++#define HDMI_FC_GMD_PB24 0x111D
++#define HDMI_FC_GMD_PB25 0x111E
++#define HDMI_FC_GMD_PB26 0x111F
++#define HDMI_FC_GMD_PB27 0x1120
++
++#define HDMI_FC_DBGFORCE 0x1200
++#define HDMI_FC_DBGAUD0CH0 0x1201
++#define HDMI_FC_DBGAUD1CH0 0x1202
++#define HDMI_FC_DBGAUD2CH0 0x1203
++#define HDMI_FC_DBGAUD0CH1 0x1204
++#define HDMI_FC_DBGAUD1CH1 0x1205
++#define HDMI_FC_DBGAUD2CH1 0x1206
++#define HDMI_FC_DBGAUD0CH2 0x1207
++#define HDMI_FC_DBGAUD1CH2 0x1208
++#define HDMI_FC_DBGAUD2CH2 0x1209
++#define HDMI_FC_DBGAUD0CH3 0x120A
++#define HDMI_FC_DBGAUD1CH3 0x120B
++#define HDMI_FC_DBGAUD2CH3 0x120C
++#define HDMI_FC_DBGAUD0CH4 0x120D
++#define HDMI_FC_DBGAUD1CH4 0x120E
++#define HDMI_FC_DBGAUD2CH4 0x120F
++#define HDMI_FC_DBGAUD0CH5 0x1210
++#define HDMI_FC_DBGAUD1CH5 0x1211
++#define HDMI_FC_DBGAUD2CH5 0x1212
++#define HDMI_FC_DBGAUD0CH6 0x1213
++#define HDMI_FC_DBGAUD1CH6 0x1214
++#define HDMI_FC_DBGAUD2CH6 0x1215
++#define HDMI_FC_DBGAUD0CH7 0x1216
++#define HDMI_FC_DBGAUD1CH7 0x1217
++#define HDMI_FC_DBGAUD2CH7 0x1218
++#define HDMI_FC_DBGTMDS0 0x1219
++#define HDMI_FC_DBGTMDS1 0x121A
++#define HDMI_FC_DBGTMDS2 0x121B
++
++/* HDMI Source PHY Registers */
++#define HDMI_PHY_CONF0 0x3000
++#define HDMI_PHY_TST0 0x3001
++#define HDMI_PHY_TST1 0x3002
++#define HDMI_PHY_TST2 0x3003
++#define HDMI_PHY_STAT0 0x3004
++#define HDMI_PHY_INT0 0x3005
++#define HDMI_PHY_MASK0 0x3006
++#define HDMI_PHY_POL0 0x3007
++
++/* HDMI Master PHY Registers */
++#define HDMI_PHY_I2CM_SLAVE_ADDR 0x3020
++#define HDMI_PHY_I2CM_ADDRESS_ADDR 0x3021
++#define HDMI_PHY_I2CM_DATAO_1_ADDR 0x3022
++#define HDMI_PHY_I2CM_DATAO_0_ADDR 0x3023
++#define HDMI_PHY_I2CM_DATAI_1_ADDR 0x3024
++#define HDMI_PHY_I2CM_DATAI_0_ADDR 0x3025
++#define HDMI_PHY_I2CM_OPERATION_ADDR 0x3026
++#define HDMI_PHY_I2CM_INT_ADDR 0x3027
++#define HDMI_PHY_I2CM_CTLINT_ADDR 0x3028
++#define HDMI_PHY_I2CM_DIV_ADDR 0x3029
++#define HDMI_PHY_I2CM_SOFTRSTZ_ADDR 0x302a
++#define HDMI_PHY_I2CM_SS_SCL_HCNT_1_ADDR 0x302b
++#define HDMI_PHY_I2CM_SS_SCL_HCNT_0_ADDR 0x302c
++#define HDMI_PHY_I2CM_SS_SCL_LCNT_1_ADDR 0x302d
++#define HDMI_PHY_I2CM_SS_SCL_LCNT_0_ADDR 0x302e
++#define HDMI_PHY_I2CM_FS_SCL_HCNT_1_ADDR 0x302f
++#define HDMI_PHY_I2CM_FS_SCL_HCNT_0_ADDR 0x3030
++#define HDMI_PHY_I2CM_FS_SCL_LCNT_1_ADDR 0x3031
++#define HDMI_PHY_I2CM_FS_SCL_LCNT_0_ADDR 0x3032
++
++/* Audio Sampler Registers */
++#define HDMI_AUD_CONF0 0x3100
++#define HDMI_AUD_CONF1 0x3101
++#define HDMI_AUD_INT 0x3102
++#define HDMI_AUD_CONF2 0x3103
++#define HDMI_AUD_N1 0x3200
++#define HDMI_AUD_N2 0x3201
++#define HDMI_AUD_N3 0x3202
++#define HDMI_AUD_CTS1 0x3203
++#define HDMI_AUD_CTS2 0x3204
++#define HDMI_AUD_CTS3 0x3205
++#define HDMI_AUD_INPUTCLKFS 0x3206
++#define HDMI_AUD_SPDIFINT 0x3302
++#define HDMI_AUD_CONF0_HBR 0x3400
++#define HDMI_AUD_HBR_STATUS 0x3401
++#define HDMI_AUD_HBR_INT 0x3402
++#define HDMI_AUD_HBR_POL 0x3403
++#define HDMI_AUD_HBR_MASK 0x3404
++
++/* Generic Parallel Audio Interface Registers */
++/* Not used as GPAUD interface is not enabled in hw */
++#define HDMI_GP_CONF0 0x3500
++#define HDMI_GP_CONF1 0x3501
++#define HDMI_GP_CONF2 0x3502
++#define HDMI_GP_STAT 0x3503
++#define HDMI_GP_INT 0x3504
++#define HDMI_GP_MASK 0x3505
++#define HDMI_GP_POL 0x3506
++
++/* Audio DMA Registers */
++#define HDMI_AHB_DMA_CONF0 0x3600
++#define HDMI_AHB_DMA_START 0x3601
++#define HDMI_AHB_DMA_STOP 0x3602
++#define HDMI_AHB_DMA_THRSLD 0x3603
++#define HDMI_AHB_DMA_STRADDR0 0x3604
++#define HDMI_AHB_DMA_STRADDR1 0x3605
++#define HDMI_AHB_DMA_STRADDR2 0x3606
++#define HDMI_AHB_DMA_STRADDR3 0x3607
++#define HDMI_AHB_DMA_STPADDR0 0x3608
++#define HDMI_AHB_DMA_STPADDR1 0x3609
++#define HDMI_AHB_DMA_STPADDR2 0x360a
++#define HDMI_AHB_DMA_STPADDR3 0x360b
++#define HDMI_AHB_DMA_BSTADDR0 0x360c
++#define HDMI_AHB_DMA_BSTADDR1 0x360d
++#define HDMI_AHB_DMA_BSTADDR2 0x360e
++#define HDMI_AHB_DMA_BSTADDR3 0x360f
++#define HDMI_AHB_DMA_MBLENGTH0 0x3610
++#define HDMI_AHB_DMA_MBLENGTH1 0x3611
++#define HDMI_AHB_DMA_STAT 0x3612
++#define HDMI_AHB_DMA_INT 0x3613
++#define HDMI_AHB_DMA_MASK 0x3614
++#define HDMI_AHB_DMA_POL 0x3615
++#define HDMI_AHB_DMA_CONF1 0x3616
++#define HDMI_AHB_DMA_BUFFSTAT 0x3617
++#define HDMI_AHB_DMA_BUFFINT 0x3618
++#define HDMI_AHB_DMA_BUFFMASK 0x3619
++#define HDMI_AHB_DMA_BUFFPOL 0x361a
++
++/* Main Controller Registers */
++#define HDMI_MC_SFRDIV 0x4000
++#define HDMI_MC_CLKDIS 0x4001
++#define HDMI_MC_SWRSTZ 0x4002
++#define HDMI_MC_OPCTRL 0x4003
++#define HDMI_MC_FLOWCTRL 0x4004
++#define HDMI_MC_PHYRSTZ 0x4005
++#define HDMI_MC_LOCKONCLOCK 0x4006
++#define HDMI_MC_HEACPHY_RST 0x4007
++
++/* Color Space Converter Registers */
++#define HDMI_CSC_CFG 0x4100
++#define HDMI_CSC_SCALE 0x4101
++#define HDMI_CSC_COEF_A1_MSB 0x4102
++#define HDMI_CSC_COEF_A1_LSB 0x4103
++#define HDMI_CSC_COEF_A2_MSB 0x4104
++#define HDMI_CSC_COEF_A2_LSB 0x4105
++#define HDMI_CSC_COEF_A3_MSB 0x4106
++#define HDMI_CSC_COEF_A3_LSB 0x4107
++#define HDMI_CSC_COEF_A4_MSB 0x4108
++#define HDMI_CSC_COEF_A4_LSB 0x4109
++#define HDMI_CSC_COEF_B1_MSB 0x410A
++#define HDMI_CSC_COEF_B1_LSB 0x410B
++#define HDMI_CSC_COEF_B2_MSB 0x410C
++#define HDMI_CSC_COEF_B2_LSB 0x410D
++#define HDMI_CSC_COEF_B3_MSB 0x410E
++#define HDMI_CSC_COEF_B3_LSB 0x410F
++#define HDMI_CSC_COEF_B4_MSB 0x4110
++#define HDMI_CSC_COEF_B4_LSB 0x4111
++#define HDMI_CSC_COEF_C1_MSB 0x4112
++#define HDMI_CSC_COEF_C1_LSB 0x4113
++#define HDMI_CSC_COEF_C2_MSB 0x4114
++#define HDMI_CSC_COEF_C2_LSB 0x4115
++#define HDMI_CSC_COEF_C3_MSB 0x4116
++#define HDMI_CSC_COEF_C3_LSB 0x4117
++#define HDMI_CSC_COEF_C4_MSB 0x4118
++#define HDMI_CSC_COEF_C4_LSB 0x4119
++
++/* HDCP Interrupt Registers */
++#define HDMI_A_APIINTCLR 0x5006
++#define HDMI_A_APIINTSTAT 0x5007
++#define HDMI_A_APIINTMSK 0x5008
++
++/* CEC Engine Registers */
++#define HDMI_CEC_CTRL 0x7D00
++#define HDMI_CEC_STAT 0x7D01
++#define HDMI_CEC_MASK 0x7D02
++#define HDMI_CEC_POLARITY 0x7D03
++#define HDMI_CEC_INT 0x7D04
++#define HDMI_CEC_ADDR_L 0x7D05
++#define HDMI_CEC_ADDR_H 0x7D06
++#define HDMI_CEC_TX_CNT 0x7D07
++#define HDMI_CEC_RX_CNT 0x7D08
++#define HDMI_CEC_TX_DATA0 0x7D10
++#define HDMI_CEC_TX_DATA1 0x7D11
++#define HDMI_CEC_TX_DATA2 0x7D12
++#define HDMI_CEC_TX_DATA3 0x7D13
++#define HDMI_CEC_TX_DATA4 0x7D14
++#define HDMI_CEC_TX_DATA5 0x7D15
++#define HDMI_CEC_TX_DATA6 0x7D16
++#define HDMI_CEC_TX_DATA7 0x7D17
++#define HDMI_CEC_TX_DATA8 0x7D18
++#define HDMI_CEC_TX_DATA9 0x7D19
++#define HDMI_CEC_TX_DATA10 0x7D1a
++#define HDMI_CEC_TX_DATA11 0x7D1b
++#define HDMI_CEC_TX_DATA12 0x7D1c
++#define HDMI_CEC_TX_DATA13 0x7D1d
++#define HDMI_CEC_TX_DATA14 0x7D1e
++#define HDMI_CEC_TX_DATA15 0x7D1f
++#define HDMI_CEC_RX_DATA0 0x7D20
++#define HDMI_CEC_RX_DATA1 0x7D21
++#define HDMI_CEC_RX_DATA2 0x7D22
++#define HDMI_CEC_RX_DATA3 0x7D23
++#define HDMI_CEC_RX_DATA4 0x7D24
++#define HDMI_CEC_RX_DATA5 0x7D25
++#define HDMI_CEC_RX_DATA6 0x7D26
++#define HDMI_CEC_RX_DATA7 0x7D27
++#define HDMI_CEC_RX_DATA8 0x7D28
++#define HDMI_CEC_RX_DATA9 0x7D29
++#define HDMI_CEC_RX_DATA10 0x7D2a
++#define HDMI_CEC_RX_DATA11 0x7D2b
++#define HDMI_CEC_RX_DATA12 0x7D2c
++#define HDMI_CEC_RX_DATA13 0x7D2d
++#define HDMI_CEC_RX_DATA14 0x7D2e
++#define HDMI_CEC_RX_DATA15 0x7D2f
++#define HDMI_CEC_LOCK 0x7D30
++#define HDMI_CEC_WKUPCTRL 0x7D31
++
++/* I2C Master Registers (E-DDC) */
++#define HDMI_I2CM_SLAVE 0x7E00
++#define HDMI_I2CM_ADDRESS 0x7E01
++#define HDMI_I2CM_DATAO 0x7E02
++#define HDMI_I2CM_DATAI 0x7E03
++#define HDMI_I2CM_OPERATION 0x7E04
++#define HDMI_I2CM_INT 0x7E05
++#define HDMI_I2CM_CTLINT 0x7E06
++#define HDMI_I2CM_DIV 0x7E07
++#define HDMI_I2CM_SEGADDR 0x7E08
++#define HDMI_I2CM_SOFTRSTZ 0x7E09
++#define HDMI_I2CM_SEGPTR 0x7E0A
++#define HDMI_I2CM_SS_SCL_HCNT_1_ADDR 0x7E0B
++#define HDMI_I2CM_SS_SCL_HCNT_0_ADDR 0x7E0C
++#define HDMI_I2CM_SS_SCL_LCNT_1_ADDR 0x7E0D
++#define HDMI_I2CM_SS_SCL_LCNT_0_ADDR 0x7E0E
++#define HDMI_I2CM_FS_SCL_HCNT_1_ADDR 0x7E0F
++#define HDMI_I2CM_FS_SCL_HCNT_0_ADDR 0x7E10
++#define HDMI_I2CM_FS_SCL_LCNT_1_ADDR 0x7E11
++#define HDMI_I2CM_FS_SCL_LCNT_0_ADDR 0x7E12
++
++/* Random Number Generator Registers (RNG) */
++#define HDMI_RNG_BASE 0x8000
++
++
++/*
++ * Register field definitions
++ */
++enum {
++/* IH_FC_INT2 field values */
++ HDMI_IH_FC_INT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_FC_STAT2 field values */
++ HDMI_IH_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_PHY_STAT0 field values */
++ HDMI_IH_PHY_STAT0_RX_SENSE3 = 0x20,
++ HDMI_IH_PHY_STAT0_RX_SENSE2 = 0x10,
++ HDMI_IH_PHY_STAT0_RX_SENSE1 = 0x8,
++ HDMI_IH_PHY_STAT0_RX_SENSE0 = 0x4,
++ HDMI_IH_PHY_STAT0_TX_PHY_LOCK = 0x2,
++ HDMI_IH_PHY_STAT0_HPD = 0x1,
++
++/* IH_CEC_STAT0 field values */
++ HDMI_IH_CEC_STAT0_WAKEUP = 0x40,
++ HDMI_IH_CEC_STAT0_ERROR_FOLL = 0x20,
++ HDMI_IH_CEC_STAT0_ERROR_INIT = 0x10,
++ HDMI_IH_CEC_STAT0_ARB_LOST = 0x8,
++ HDMI_IH_CEC_STAT0_NACK = 0x4,
++ HDMI_IH_CEC_STAT0_EOM = 0x2,
++ HDMI_IH_CEC_STAT0_DONE = 0x1,
++
++
++/* IH_MUTE_I2CMPHY_STAT0 field values */
++ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYDONE = 0x2,
++ HDMI_IH_MUTE_I2CMPHY_STAT0_I2CMPHYERROR = 0x1,
++
++/* IH_PHY_STAT0 field values */
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE3 = 0x20,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE2 = 0x10,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE1 = 0x8,
++ HDMI_IH_MUTE_PHY_STAT0_RX_SENSE0 = 0x4,
++ HDMI_IH_MUTE_PHY_STAT0_TX_PHY_LOCK = 0x2,
++ HDMI_IH_MUTE_PHY_STAT0_HPD = 0x1,
++
++/* IH and IH_MUTE convenience macro RX_SENSE | HPD*/
++ HDMI_DVI_IH_STAT = 0x3D,
++
++
++/* IH_AHBDMAAUD_STAT0 field values */
++ HDMI_IH_AHBDMAAUD_STAT0_ERROR = 0x20,
++ HDMI_IH_AHBDMAAUD_STAT0_LOST = 0x10,
++ HDMI_IH_AHBDMAAUD_STAT0_RETRY = 0x08,
++ HDMI_IH_AHBDMAAUD_STAT0_DONE = 0x04,
++ HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
++ HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
++
++/* IH_MUTE_FC_STAT2 field values */
++ HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_IH_MUTE_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_IH_MUTE_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* IH_MUTE_AHBDMAAUD_STAT0 field values */
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = 0x20,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = 0x10,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = 0x08,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = 0x04,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = 0x02,
++ HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = 0x01,
++
++/* IH_MUTE field values */
++ HDMI_IH_MUTE_MUTE_WAKEUP_INTERRUPT = 0x2,
++ HDMI_IH_MUTE_MUTE_ALL_INTERRUPT = 0x1,
++
++/* TX_INVID0 field values */
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_MASK = 0x80,
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_ENABLE = 0x80,
++ HDMI_TX_INVID0_INTERNAL_DE_GENERATOR_DISABLE = 0x00,
++ HDMI_TX_INVID0_VIDEO_MAPPING_MASK = 0x1F,
++ HDMI_TX_INVID0_VIDEO_MAPPING_OFFSET = 0,
++
++/* TX_INSTUFFING field values */
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_MASK = 0x4,
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_ENABLE = 0x4,
++ HDMI_TX_INSTUFFING_BDBDATA_STUFFING_DISABLE = 0x0,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_MASK = 0x2,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_ENABLE = 0x2,
++ HDMI_TX_INSTUFFING_RCRDATA_STUFFING_DISABLE = 0x0,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_MASK = 0x1,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_ENABLE = 0x1,
++ HDMI_TX_INSTUFFING_GYDATA_STUFFING_DISABLE = 0x0,
++
++/* VP_PR_CD field values */
++ HDMI_VP_PR_CD_COLOR_DEPTH_MASK = 0xF0,
++ HDMI_VP_PR_CD_COLOR_DEPTH_OFFSET = 4,
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK = 0x0F,
++ HDMI_VP_PR_CD_DESIRED_PR_FACTOR_OFFSET = 0,
++
++/* VP_STUFF field values */
++ HDMI_VP_STUFF_IDEFAULT_PHASE_MASK = 0x20,
++ HDMI_VP_STUFF_IDEFAULT_PHASE_OFFSET = 5,
++ HDMI_VP_STUFF_IFIX_PP_TO_LAST_MASK = 0x10,
++ HDMI_VP_STUFF_IFIX_PP_TO_LAST_OFFSET = 4,
++ HDMI_VP_STUFF_ICX_GOTO_P0_ST_MASK = 0x8,
++ HDMI_VP_STUFF_ICX_GOTO_P0_ST_OFFSET = 3,
++ HDMI_VP_STUFF_YCC422_STUFFING_MASK = 0x4,
++ HDMI_VP_STUFF_YCC422_STUFFING_STUFFING_MODE = 0x4,
++ HDMI_VP_STUFF_YCC422_STUFFING_DIRECT_MODE = 0x0,
++ HDMI_VP_STUFF_PP_STUFFING_MASK = 0x2,
++ HDMI_VP_STUFF_PP_STUFFING_STUFFING_MODE = 0x2,
++ HDMI_VP_STUFF_PP_STUFFING_DIRECT_MODE = 0x0,
++ HDMI_VP_STUFF_PR_STUFFING_MASK = 0x1,
++ HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE = 0x1,
++ HDMI_VP_STUFF_PR_STUFFING_DIRECT_MODE = 0x0,
++
++/* VP_CONF field values */
++ HDMI_VP_CONF_BYPASS_EN_MASK = 0x40,
++ HDMI_VP_CONF_BYPASS_EN_ENABLE = 0x40,
++ HDMI_VP_CONF_BYPASS_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_PP_EN_ENMASK = 0x20,
++ HDMI_VP_CONF_PP_EN_ENABLE = 0x20,
++ HDMI_VP_CONF_PP_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_PR_EN_MASK = 0x10,
++ HDMI_VP_CONF_PR_EN_ENABLE = 0x10,
++ HDMI_VP_CONF_PR_EN_DISABLE = 0x00,
++ HDMI_VP_CONF_YCC422_EN_MASK = 0x8,
++ HDMI_VP_CONF_YCC422_EN_ENABLE = 0x8,
++ HDMI_VP_CONF_YCC422_EN_DISABLE = 0x0,
++ HDMI_VP_CONF_BYPASS_SELECT_MASK = 0x4,
++ HDMI_VP_CONF_BYPASS_SELECT_VID_PACKETIZER = 0x4,
++ HDMI_VP_CONF_BYPASS_SELECT_PIX_REPEATER = 0x0,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_MASK = 0x3,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS = 0x3,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_YCC422 = 0x1,
++ HDMI_VP_CONF_OUTPUT_SELECTOR_PP = 0x0,
++
++/* VP_REMAP field values */
++ HDMI_VP_REMAP_MASK = 0x3,
++ HDMI_VP_REMAP_YCC422_24bit = 0x2,
++ HDMI_VP_REMAP_YCC422_20bit = 0x1,
++ HDMI_VP_REMAP_YCC422_16bit = 0x0,
++
++/* FC_INVIDCONF field values */
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_MASK = 0x40,
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_HIGH = 0x40,
++ HDMI_FC_INVIDCONF_VSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_MASK = 0x20,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_HIGH = 0x20,
++ HDMI_FC_INVIDCONF_HSYNC_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_MASK = 0x10,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_HIGH = 0x10,
++ HDMI_FC_INVIDCONF_DE_IN_POLARITY_ACTIVE_LOW = 0x00,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_MASK = 0x8,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_HDMI_MODE = 0x8,
++ HDMI_FC_INVIDCONF_DVI_MODEZ_DVI_MODE = 0x0,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_MASK = 0x2,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_HIGH = 0x2,
++ HDMI_FC_INVIDCONF_R_V_BLANK_IN_OSC_ACTIVE_LOW = 0x0,
++ HDMI_FC_INVIDCONF_IN_I_P_MASK = 0x1,
++ HDMI_FC_INVIDCONF_IN_I_P_INTERLACED = 0x1,
++ HDMI_FC_INVIDCONF_IN_I_P_PROGRESSIVE = 0x0,
++
++/* FC_AUDICONF0 field values */
++ HDMI_FC_AUDICONF0_CC_OFFSET = 4,
++ HDMI_FC_AUDICONF0_CC_MASK = 0x70,
++ HDMI_FC_AUDICONF0_CT_OFFSET = 0,
++ HDMI_FC_AUDICONF0_CT_MASK = 0xF,
++
++/* FC_AUDICONF1 field values */
++ HDMI_FC_AUDICONF1_SS_OFFSET = 3,
++ HDMI_FC_AUDICONF1_SS_MASK = 0x18,
++ HDMI_FC_AUDICONF1_SF_OFFSET = 0,
++ HDMI_FC_AUDICONF1_SF_MASK = 0x7,
++
++/* FC_AUDICONF3 field values */
++ HDMI_FC_AUDICONF3_LFEPBL_OFFSET = 5,
++ HDMI_FC_AUDICONF3_LFEPBL_MASK = 0x60,
++ HDMI_FC_AUDICONF3_DM_INH_OFFSET = 4,
++ HDMI_FC_AUDICONF3_DM_INH_MASK = 0x10,
++ HDMI_FC_AUDICONF3_LSV_OFFSET = 0,
++ HDMI_FC_AUDICONF3_LSV_MASK = 0xF,
++
++/* FC_AUDSCHNLS0 field values */
++ HDMI_FC_AUDSCHNLS0_CGMSA_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS0_CGMSA_MASK = 0x30,
++ HDMI_FC_AUDSCHNLS0_COPYRIGHT_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS0_COPYRIGHT_MASK = 0x01,
++
++/* FC_AUDSCHNLS3-6 field values */
++ HDMI_FC_AUDSCHNLS3_OIEC_CH0_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH0_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH1_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS3_OIEC_CH1_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH2_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH2_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH3_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS4_OIEC_CH3_MASK = 0xf0,
++
++ HDMI_FC_AUDSCHNLS5_OIEC_CH0_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH0_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH1_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS5_OIEC_CH1_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH2_OFFSET = 0,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH2_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH3_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS6_OIEC_CH3_MASK = 0xf0,
++
++/* HDMI_FC_AUDSCHNLS7 field values */
++ HDMI_FC_AUDSCHNLS7_ACCURACY_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS7_ACCURACY_MASK = 0x30,
++
++/* HDMI_FC_AUDSCHNLS8 field values */
++ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_MASK = 0xf0,
++ HDMI_FC_AUDSCHNLS8_ORIGSAMPFREQ_OFFSET = 4,
++ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_MASK = 0x0f,
++ HDMI_FC_AUDSCHNLS8_WORDLEGNTH_OFFSET = 0,
++
++/* FC_AUDSCONF field values */
++ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_MASK = 0xF0,
++ HDMI_FC_AUDSCONF_AUD_PACKET_SAMPFIT_OFFSET = 4,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_MASK = 0x1,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_OFFSET = 0,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT1 = 0x1,
++ HDMI_FC_AUDSCONF_AUD_PACKET_LAYOUT_LAYOUT0 = 0x0,
++
++/* FC_STAT2 field values */
++ HDMI_FC_STAT2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_STAT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_STAT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_INT2 field values */
++ HDMI_FC_INT2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_INT2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_INT2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_MASK2 field values */
++ HDMI_FC_MASK2_OVERFLOW_MASK = 0x03,
++ HDMI_FC_MASK2_LOW_PRIORITY_OVERFLOW = 0x02,
++ HDMI_FC_MASK2_HIGH_PRIORITY_OVERFLOW = 0x01,
++
++/* FC_PRCONF field values */
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_MASK = 0xF0,
++ HDMI_FC_PRCONF_INCOMING_PR_FACTOR_OFFSET = 4,
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F,
++ HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0,
++
++/* FC_AVICONF0-FC_AVICONF3 field values */
++ HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03,
++ HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00,
++ HDMI_FC_AVICONF0_PIX_FMT_YCBCR422 = 0x01,
++ HDMI_FC_AVICONF0_PIX_FMT_YCBCR444 = 0x02,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_MASK = 0x40,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT = 0x40,
++ HDMI_FC_AVICONF0_ACTIVE_FMT_NO_INFO = 0x00,
++ HDMI_FC_AVICONF0_BAR_DATA_MASK = 0x0C,
++ HDMI_FC_AVICONF0_BAR_DATA_NO_DATA = 0x00,
++ HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR = 0x04,
++ HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR = 0x08,
++ HDMI_FC_AVICONF0_BAR_DATA_VERT_HORIZ_BAR = 0x0C,
++ HDMI_FC_AVICONF0_SCAN_INFO_MASK = 0x30,
++ HDMI_FC_AVICONF0_SCAN_INFO_OVERSCAN = 0x10,
++ HDMI_FC_AVICONF0_SCAN_INFO_UNDERSCAN = 0x20,
++ HDMI_FC_AVICONF0_SCAN_INFO_NODATA = 0x00,
++
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_MASK = 0x0F,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_USE_CODED = 0x08,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_4_3 = 0x09,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_16_9 = 0x0A,
++ HDMI_FC_AVICONF1_ACTIVE_ASPECT_RATIO_14_9 = 0x0B,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_MASK = 0x30,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_NO_DATA = 0x00,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_4_3 = 0x10,
++ HDMI_FC_AVICONF1_CODED_ASPECT_RATIO_16_9 = 0x20,
++ HDMI_FC_AVICONF1_COLORIMETRY_MASK = 0xC0,
++ HDMI_FC_AVICONF1_COLORIMETRY_NO_DATA = 0x00,
++ HDMI_FC_AVICONF1_COLORIMETRY_SMPTE = 0x40,
++ HDMI_FC_AVICONF1_COLORIMETRY_ITUR = 0x80,
++ HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO = 0xC0,
++
++ HDMI_FC_AVICONF2_SCALING_MASK = 0x03,
++ HDMI_FC_AVICONF2_SCALING_NONE = 0x00,
++ HDMI_FC_AVICONF2_SCALING_HORIZ = 0x01,
++ HDMI_FC_AVICONF2_SCALING_VERT = 0x02,
++ HDMI_FC_AVICONF2_SCALING_HORIZ_VERT = 0x03,
++ HDMI_FC_AVICONF2_RGB_QUANT_MASK = 0x0C,
++ HDMI_FC_AVICONF2_RGB_QUANT_DEFAULT = 0x00,
++ HDMI_FC_AVICONF2_RGB_QUANT_LIMITED_RANGE = 0x04,
++ HDMI_FC_AVICONF2_RGB_QUANT_FULL_RANGE = 0x08,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_MASK = 0x70,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601 = 0x00,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709 = 0x10,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_SYCC601 = 0x20,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_YCC601 = 0x30,
++ HDMI_FC_AVICONF2_EXT_COLORIMETRY_ADOBE_RGB = 0x40,
++ HDMI_FC_AVICONF2_IT_CONTENT_MASK = 0x80,
++ HDMI_FC_AVICONF2_IT_CONTENT_NO_DATA = 0x00,
++ HDMI_FC_AVICONF2_IT_CONTENT_VALID = 0x80,
++
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_MASK = 0x03,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GRAPHICS = 0x00,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_PHOTO = 0x01,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_CINEMA = 0x02,
++ HDMI_FC_AVICONF3_IT_CONTENT_TYPE_GAME = 0x03,
++ HDMI_FC_AVICONF3_QUANT_RANGE_MASK = 0x0C,
++ HDMI_FC_AVICONF3_QUANT_RANGE_LIMITED = 0x00,
++ HDMI_FC_AVICONF3_QUANT_RANGE_FULL = 0x04,
++
++/* FC_DBGFORCE field values */
++ HDMI_FC_DBGFORCE_FORCEAUDIO = 0x10,
++ HDMI_FC_DBGFORCE_FORCEVIDEO = 0x1,
++
++/* PHY_CONF0 field values */
++ HDMI_PHY_CONF0_PDZ_MASK = 0x80,
++ HDMI_PHY_CONF0_PDZ_OFFSET = 7,
++ HDMI_PHY_CONF0_ENTMDS_MASK = 0x40,
++ HDMI_PHY_CONF0_ENTMDS_OFFSET = 6,
++ HDMI_PHY_CONF0_SPARECTRL = 0x20,
++ HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10,
++ HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8,
++ HDMI_PHY_CONF0_GEN2_TXPWRON_OFFSET = 3,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_MASK = 0x4,
++ HDMI_PHY_CONF0_GEN2_ENHPDRXSENSE_OFFSET = 2,
++ HDMI_PHY_CONF0_SELDATAENPOL_MASK = 0x2,
++ HDMI_PHY_CONF0_SELDATAENPOL_OFFSET = 1,
++ HDMI_PHY_CONF0_SELDIPIF_MASK = 0x1,
++ HDMI_PHY_CONF0_SELDIPIF_OFFSET = 0,
++
++/* PHY_TST0 field values */
++ HDMI_PHY_TST0_TSTCLR_MASK = 0x20,
++ HDMI_PHY_TST0_TSTCLR_OFFSET = 5,
++ HDMI_PHY_TST0_TSTEN_MASK = 0x10,
++ HDMI_PHY_TST0_TSTEN_OFFSET = 4,
++ HDMI_PHY_TST0_TSTCLK_MASK = 0x1,
++ HDMI_PHY_TST0_TSTCLK_OFFSET = 0,
++
++/* PHY_STAT0 field values */
++ HDMI_PHY_RX_SENSE3 = 0x80,
++ HDMI_PHY_RX_SENSE2 = 0x40,
++ HDMI_PHY_RX_SENSE1 = 0x20,
++ HDMI_PHY_RX_SENSE0 = 0x10,
++ HDMI_PHY_HPD = 0x02,
++ HDMI_PHY_TX_PHY_LOCK = 0x01,
++
++/* HDMI STAT convenience RX_SENSE | HPD */
++ HDMI_DVI_STAT = 0xF2,
++
++/* PHY_I2CM_SLAVE_ADDR field values */
++ HDMI_PHY_I2CM_SLAVE_ADDR_PHY_GEN2 = 0x69,
++ HDMI_PHY_I2CM_SLAVE_ADDR_HEAC_PHY = 0x49,
++
++/* PHY_I2CM_OPERATION_ADDR field values */
++ HDMI_PHY_I2CM_OPERATION_ADDR_WRITE = 0x10,
++ HDMI_PHY_I2CM_OPERATION_ADDR_READ = 0x1,
++
++/* HDMI_PHY_I2CM_INT_ADDR */
++ HDMI_PHY_I2CM_INT_ADDR_DONE_POL = 0x08,
++ HDMI_PHY_I2CM_INT_ADDR_DONE_MASK = 0x04,
++
++/* HDMI_PHY_I2CM_CTLINT_ADDR */
++ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_POL = 0x80,
++ HDMI_PHY_I2CM_CTLINT_ADDR_NAC_MASK = 0x40,
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_POL = 0x08,
++ HDMI_PHY_I2CM_CTLINT_ADDR_ARBITRATION_MASK = 0x04,
++
++/* AUD_CTS3 field values */
++ HDMI_AUD_CTS3_N_SHIFT_OFFSET = 5,
++ HDMI_AUD_CTS3_N_SHIFT_MASK = 0xe0,
++ HDMI_AUD_CTS3_N_SHIFT_1 = 0,
++ HDMI_AUD_CTS3_N_SHIFT_16 = 0x20,
++ HDMI_AUD_CTS3_N_SHIFT_32 = 0x40,
++ HDMI_AUD_CTS3_N_SHIFT_64 = 0x60,
++ HDMI_AUD_CTS3_N_SHIFT_128 = 0x80,
++ HDMI_AUD_CTS3_N_SHIFT_256 = 0xa0,
++ /* note that the CTS3 MANUAL bit has been removed
++ from our part. Can't set it, will read as 0. */
++ HDMI_AUD_CTS3_CTS_MANUAL = 0x10,
++ HDMI_AUD_CTS3_AUDCTS19_16_MASK = 0x0f,
++
++/* AHB_DMA_CONF0 field values */
++ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_OFFSET = 7,
++ HDMI_AHB_DMA_CONF0_SW_FIFO_RST_MASK = 0x80,
++ HDMI_AHB_DMA_CONF0_HBR_OFFSET = 4,
++ HDMI_AHB_DMA_CONF0_HBR_MASK = 0x10,
++ HDMI_AHB_DMA_CONF0_EN_HLOCK_OFFSET = 3,
++ HDMI_AHB_DMA_CONF0_EN_HLOCK_MASK = 0x08,
++ HDMI_AHB_DMA_CONF0_INCR_TYPE_OFFSET = 1,
++ HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK = 0x06,
++ HDMI_AHB_DMA_CONF0_INCR4 = 0x0,
++ HDMI_AHB_DMA_CONF0_INCR8 = 0x2,
++ HDMI_AHB_DMA_CONF0_INCR16 = 0x4,
++ HDMI_AHB_DMA_CONF0_BURST_MODE = 0x1,
++
++/* HDMI_AHB_DMA_START field values */
++ HDMI_AHB_DMA_START_START_OFFSET = 0,
++ HDMI_AHB_DMA_START_START_MASK = 0x01,
++
++/* HDMI_AHB_DMA_STOP field values */
++ HDMI_AHB_DMA_STOP_STOP_OFFSET = 0,
++ HDMI_AHB_DMA_STOP_STOP_MASK = 0x01,
++
++/* AHB_DMA_STAT, AHB_DMA_INT, AHB_DMA_MASK, AHB_DMA_POL field values */
++ HDMI_AHB_DMA_DONE = 0x80,
++ HDMI_AHB_DMA_RETRY_SPLIT = 0x40,
++ HDMI_AHB_DMA_LOSTOWNERSHIP = 0x20,
++ HDMI_AHB_DMA_ERROR = 0x10,
++ HDMI_AHB_DMA_FIFO_THREMPTY = 0x04,
++ HDMI_AHB_DMA_FIFO_FULL = 0x02,
++ HDMI_AHB_DMA_FIFO_EMPTY = 0x01,
++
++/* AHB_DMA_BUFFSTAT, AHB_DMA_BUFFINT, AHB_DMA_BUFFMASK, AHB_DMA_BUFFPOL field values */
++ HDMI_AHB_DMA_BUFFSTAT_FULL = 0x02,
++ HDMI_AHB_DMA_BUFFSTAT_EMPTY = 0x01,
++
++/* MC_CLKDIS field values */
++ HDMI_MC_CLKDIS_HDCPCLK_DISABLE = 0x40,
++ HDMI_MC_CLKDIS_CECCLK_DISABLE = 0x20,
++ HDMI_MC_CLKDIS_CSCCLK_DISABLE = 0x10,
++ HDMI_MC_CLKDIS_AUDCLK_DISABLE = 0x8,
++ HDMI_MC_CLKDIS_PREPCLK_DISABLE = 0x4,
++ HDMI_MC_CLKDIS_TMDSCLK_DISABLE = 0x2,
++ HDMI_MC_CLKDIS_PIXELCLK_DISABLE = 0x1,
++
++/* MC_SWRSTZ field values */
++ HDMI_MC_SWRSTZ_TMDSSWRST_REQ = 0x02,
++
++/* MC_FLOWCTRL field values */
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_MASK = 0x1,
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH = 0x1,
++ HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS = 0x0,
++
++/* MC_PHYRSTZ field values */
++ HDMI_MC_PHYRSTZ_ASSERT = 0x0,
++ HDMI_MC_PHYRSTZ_DEASSERT = 0x1,
++
++/* MC_HEACPHY_RST field values */
++ HDMI_MC_HEACPHY_RST_ASSERT = 0x1,
++ HDMI_MC_HEACPHY_RST_DEASSERT = 0x0,
++
++/* CSC_CFG field values */
++ HDMI_CSC_CFG_INTMODE_MASK = 0x30,
++ HDMI_CSC_CFG_INTMODE_OFFSET = 4,
++ HDMI_CSC_CFG_INTMODE_DISABLE = 0x00,
++ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA1 = 0x10,
++ HDMI_CSC_CFG_INTMODE_CHROMA_INT_FORMULA2 = 0x20,
++ HDMI_CSC_CFG_DECMODE_MASK = 0x3,
++ HDMI_CSC_CFG_DECMODE_OFFSET = 0,
++ HDMI_CSC_CFG_DECMODE_DISABLE = 0x0,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA1 = 0x1,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA2 = 0x2,
++ HDMI_CSC_CFG_DECMODE_CHROMA_INT_FORMULA3 = 0x3,
++
++/* CSC_SCALE field values */
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_MASK = 0xF0,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_24BPP = 0x00,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_30BPP = 0x50,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_36BPP = 0x60,
++ HDMI_CSC_SCALE_CSC_COLORDE_PTH_48BPP = 0x70,
++ HDMI_CSC_SCALE_CSCSCALE_MASK = 0x03,
++
++/* I2CM_OPERATION field values */
++ HDMI_I2CM_OPERATION_WRITE = 0x10,
++ HDMI_I2CM_OPERATION_READ_EXT = 0x2,
++ HDMI_I2CM_OPERATION_READ = 0x1,
++
++/* HDMI_I2CM_INT */
++ HDMI_I2CM_INT_DONE_POL = 0x08,
++ HDMI_I2CM_INT_DONE_MASK = 0x04,
++
++/* HDMI_I2CM_CTLINT */
++ HDMI_I2CM_CTLINT_NAC_POL = 0x80,
++ HDMI_I2CM_CTLINT_NAC_MASK = 0x40,
++ HDMI_I2CM_CTLINT_ARBITRATION_POL = 0x08,
++ HDMI_I2CM_CTLINT_ARBITRATION_MASK = 0x04,
++
++};
++
++enum imx_hdmi_type {
++ IMX6DL_HDMI,
++ IMX6Q_HDMI,
++};
++
++/* IOCTL commands */
++#define HDMI_IOC_MAGIC 'H'
++
++#define HDMI_IOC_GET_RESOURCE _IO(HDMI_IOC_MAGIC, 0)
++#define HDMI_IOC_GET_CPU_TYPE _IO(HDMI_IOC_MAGIC, 1)
++
++
++#endif /* __MXC_HDMI_H__ */
+diff -Nur linux-4.1.3/sound/soc/codecs/hdmi.c linux-xbian-imx6/sound/soc/codecs/hdmi.c
+--- linux-4.1.3/sound/soc/codecs/hdmi.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/codecs/hdmi.c 2015-07-27 23:13:11.073473395 +0200
+@@ -47,7 +47,7 @@
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE,
+- .sig_bits = 24,
++// .sig_bits = 24,
+ },
+ .capture = {
+ .stream_name = "Capture",
+diff -Nur linux-4.1.3/sound/soc/codecs/sgtl5000.c linux-xbian-imx6/sound/soc/codecs/sgtl5000.c
+--- linux-4.1.3/sound/soc/codecs/sgtl5000.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/codecs/sgtl5000.c 2015-07-27 23:13:11.085430733 +0200
+@@ -16,7 +16,6 @@
+ #include <linux/pm.h>
+ #include <linux/i2c.h>
+ #include <linux/clk.h>
+-#include <linux/log2.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/driver.h>
+ #include <linux/regulator/machine.h>
+@@ -35,34 +34,25 @@
+ #define SGTL5000_DAP_REG_OFFSET 0x0100
+ #define SGTL5000_MAX_REG_OFFSET 0x013A
+
++
++int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
++ int addr_bits, int data_bits,
++ enum snd_soc_control_type control);
++
+ /* default value of sgtl5000 registers */
+ static const struct reg_default sgtl5000_reg_defaults[] = {
+- { SGTL5000_CHIP_DIG_POWER, 0x0000 },
+ { SGTL5000_CHIP_CLK_CTRL, 0x0008 },
+ { SGTL5000_CHIP_I2S_CTRL, 0x0010 },
+ { SGTL5000_CHIP_SSS_CTRL, 0x0010 },
+- { SGTL5000_CHIP_ADCDAC_CTRL, 0x020c },
+ { SGTL5000_CHIP_DAC_VOL, 0x3c3c },
+ { SGTL5000_CHIP_PAD_STRENGTH, 0x015f },
+- { SGTL5000_CHIP_ANA_ADC_CTRL, 0x0000 },
+ { SGTL5000_CHIP_ANA_HP_CTRL, 0x1818 },
+ { SGTL5000_CHIP_ANA_CTRL, 0x0111 },
+- { SGTL5000_CHIP_LINREG_CTRL, 0x0000 },
+- { SGTL5000_CHIP_REF_CTRL, 0x0000 },
+- { SGTL5000_CHIP_MIC_CTRL, 0x0000 },
+- { SGTL5000_CHIP_LINE_OUT_CTRL, 0x0000 },
+ { SGTL5000_CHIP_LINE_OUT_VOL, 0x0404 },
+ { SGTL5000_CHIP_ANA_POWER, 0x7060 },
+ { SGTL5000_CHIP_PLL_CTRL, 0x5000 },
+- { SGTL5000_CHIP_CLK_TOP_CTRL, 0x0000 },
+- { SGTL5000_CHIP_ANA_STATUS, 0x0000 },
+- { SGTL5000_CHIP_SHORT_CTRL, 0x0000 },
+- { SGTL5000_CHIP_ANA_TEST2, 0x0000 },
+- { SGTL5000_DAP_CTRL, 0x0000 },
+- { SGTL5000_DAP_PEQ, 0x0000 },
+ { SGTL5000_DAP_BASS_ENHANCE, 0x0040 },
+ { SGTL5000_DAP_BASS_ENHANCE_CTRL, 0x051f },
+- { SGTL5000_DAP_AUDIO_EQ, 0x0000 },
+ { SGTL5000_DAP_SURROUND, 0x0040 },
+ { SGTL5000_DAP_EQ_BASS_BAND0, 0x002f },
+ { SGTL5000_DAP_EQ_BASS_BAND1, 0x002f },
+@@ -70,7 +60,6 @@
+ { SGTL5000_DAP_EQ_BASS_BAND3, 0x002f },
+ { SGTL5000_DAP_EQ_BASS_BAND4, 0x002f },
+ { SGTL5000_DAP_MAIN_CHAN, 0x8000 },
+- { SGTL5000_DAP_MIX_CHAN, 0x0000 },
+ { SGTL5000_DAP_AVC_CTRL, 0x0510 },
+ { SGTL5000_DAP_AVC_THRESHOLD, 0x1473 },
+ { SGTL5000_DAP_AVC_ATTACK, 0x0028 },
+@@ -122,13 +111,6 @@
+ bool enabled;
+ };
+
+-enum sgtl5000_micbias_resistor {
+- SGTL5000_MICBIAS_OFF = 0,
+- SGTL5000_MICBIAS_2K = 2,
+- SGTL5000_MICBIAS_4K = 4,
+- SGTL5000_MICBIAS_8K = 8,
+-};
+-
+ /* sgtl5000 private structure in codec */
+ struct sgtl5000_priv {
+ int sysclk; /* sysclk rate */
+@@ -216,9 +198,8 @@
+ "MIC_IN", "LINE_IN"
+ };
+
+-static SOC_ENUM_SINGLE_DECL(adc_enum,
+- SGTL5000_CHIP_ANA_CTRL, 2,
+- adc_mux_text);
++static const struct soc_enum adc_enum =
++SOC_ENUM_SINGLE(SGTL5000_CHIP_ANA_CTRL, 2, 2, adc_mux_text);
+
+ static const struct snd_kcontrol_new adc_mux =
+ SOC_DAPM_ENUM("Capture Mux", adc_enum);
+@@ -228,9 +209,8 @@
+ "DAC", "LINE_IN"
+ };
+
+-static SOC_ENUM_SINGLE_DECL(dac_enum,
+- SGTL5000_CHIP_ANA_CTRL, 6,
+- dac_mux_text);
++static const struct soc_enum dac_enum =
++SOC_ENUM_SINGLE(SGTL5000_CHIP_ANA_CTRL, 6, 2, dac_mux_text);
+
+ static const struct snd_kcontrol_new dac_mux =
+ SOC_DAPM_ENUM("Headphone Mux", dac_enum);
+@@ -325,7 +305,7 @@
+ static int dac_get_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int reg;
+ int l;
+ int r;
+@@ -378,7 +358,7 @@
+ static int dac_put_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+ {
+- struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
++ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ int reg;
+ int l;
+ int r;
+@@ -485,21 +465,21 @@
+ /* setting i2s data format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+- i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
++ i2sctl |= SGTL5000_I2S_MODE_PCM;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+- i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
++ i2sctl |= SGTL5000_I2S_MODE_PCM;
+ i2sctl |= SGTL5000_I2S_LRALIGN;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+- i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
++ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+- i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT;
++ i2sctl |= SGTL5000_I2S_MODE_RJ;
+ i2sctl |= SGTL5000_I2S_LRPOL;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+- i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
++ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+ i2sctl |= SGTL5000_I2S_LRALIGN;
+ break;
+ default:
+@@ -544,16 +524,16 @@
+
+ /*
+ * set clock according to i2s frame clock,
+- * sgtl5000 provides 2 clock sources:
+- * 1. sys_mclk: sample freq can only be configured to
++ * sgtl5000 provide 2 clock sources.
++ * 1. sys_mclk. sample freq can only configure to
+ * 1/256, 1/384, 1/512 of sys_mclk.
+- * 2. pll: can derive any audio clocks.
++ * 2. pll. can derive any audio clocks.
+ *
+ * clock setting rules:
+- * 1. in slave mode, only sys_mclk can be used
+- * 2. as constraint by sys_mclk, sample freq should be set to 32 kHz, 44.1 kHz
+- * and above.
+- * 3. usage of sys_mclk is preferred over pll to save power.
++ * 1. in slave mode, only sys_mclk can use.
++ * 2. as constraint by sys_mclk, sample freq should
++ * set to 32k, 44.1k and above.
++ * 3. using sys_mclk prefer to pll to save power.
+ */
+ static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
+ {
+@@ -563,8 +543,8 @@
+
+ /*
+ * sample freq should be divided by frame clock,
+- * if frame clock is lower than 44.1 kHz, sample freq should be set to
+- * 32 kHz or 44.1 kHz.
++ * if frame clock lower than 44.1khz, sample feq should set to
++ * 32khz or 44.1khz.
+ */
+ switch (frame_rate) {
+ case 8000:
+@@ -617,10 +597,9 @@
+
+ /*
+ * calculate the divider of mclk/sample_freq,
+- * factor of freq = 96 kHz can only be 256, since mclk is in the range
+- * of 8 MHz - 27 MHz
++ * factor of freq =96k can only be 256, since mclk in range (12m,27m)
+ */
+- switch (sgtl5000->sysclk / frame_rate) {
++ switch (sgtl5000->sysclk / sys_fs) {
+ case 256:
+ clk_ctl |= SGTL5000_MCLK_FREQ_256FS <<
+ SGTL5000_MCLK_FREQ_SHIFT;
+@@ -634,16 +613,13 @@
+ SGTL5000_MCLK_FREQ_SHIFT;
+ break;
+ default:
+- /* if mclk does not satisfy the divider, use pll */
++ /* if mclk not satisify the divider, use pll */
+ if (sgtl5000->master) {
+ clk_ctl |= SGTL5000_MCLK_FREQ_PLL <<
+ SGTL5000_MCLK_FREQ_SHIFT;
+ } else {
+ dev_err(codec->dev,
+ "PLL not supported in slave mode\n");
+- dev_err(codec->dev, "%d ratio is not supported. "
+- "SYS_MCLK needs to be 256, 384 or 512 * fs\n",
+- sgtl5000->sysclk / frame_rate);
+ return -EINVAL;
+ }
+ }
+@@ -742,25 +718,25 @@
+ return ret;
+
+ /* set i2s data format */
+- switch (params_width(params)) {
+- case 16:
++ switch (params_format(params)) {
++ case SNDRV_PCM_FORMAT_S16_LE:
+ if (sgtl5000->fmt == SND_SOC_DAIFMT_RIGHT_J)
+ return -EINVAL;
+ i2s_ctl |= SGTL5000_I2S_DLEN_16 << SGTL5000_I2S_DLEN_SHIFT;
+ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_32FS <<
+ SGTL5000_I2S_SCLKFREQ_SHIFT;
+ break;
+- case 20:
++ case SNDRV_PCM_FORMAT_S20_3LE:
+ i2s_ctl |= SGTL5000_I2S_DLEN_20 << SGTL5000_I2S_DLEN_SHIFT;
+ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_64FS <<
+ SGTL5000_I2S_SCLKFREQ_SHIFT;
+ break;
+- case 24:
++ case SNDRV_PCM_FORMAT_S24_LE:
+ i2s_ctl |= SGTL5000_I2S_DLEN_24 << SGTL5000_I2S_DLEN_SHIFT;
+ i2s_ctl |= SGTL5000_I2S_SCLKFREQ_64FS <<
+ SGTL5000_I2S_SCLKFREQ_SHIFT;
+ break;
+- case 32:
++ case SNDRV_PCM_FORMAT_S32_LE:
+ if (sgtl5000->fmt == SND_SOC_DAIFMT_RIGHT_J)
+ return -EINVAL;
+ i2s_ctl |= SGTL5000_I2S_DLEN_32 << SGTL5000_I2S_DLEN_SHIFT;
+@@ -791,7 +767,7 @@
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
+ int reg;
+-
++dev_info(codec->dev, "%s(): enabled %u\n", __func__, ldo->enabled);
+ if (ldo_regulator_is_enabled(dev))
+ return 0;
+
+@@ -810,7 +786,7 @@
+ SGTL5000_LINEREG_D_POWERUP,
+ SGTL5000_LINEREG_D_POWERUP);
+
+- /* when internal ldo is enabled, simple digital power can be disabled */
++ /* when internal ldo enabled, simple digital power can be disabled */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINREG_SIMPLE_POWERUP,
+ 0);
+@@ -823,10 +799,16 @@
+ {
+ struct ldo_regulator *ldo = rdev_get_drvdata(dev);
+ struct snd_soc_codec *codec = (struct snd_soc_codec *)ldo->codec_data;
++dev_info(codec->dev, "%s(): enabled %u\n", __func__, ldo->enabled);
++
++ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_LINREG_SIMPLE_POWERUP,
++ SGTL5000_LINREG_SIMPLE_POWERUP);
+
+ snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_LINEREG_D_POWERUP,
+ 0);
++dev_info(codec->dev, "%s: ANA_POWER = 0x%04x\n", __func__, snd_soc_read(codec, SGTL5000_CHIP_ANA_POWER));
+
+ /* clear voltage info */
+ snd_soc_update_bits(codec, SGTL5000_CHIP_LINREG_CTRL,
+@@ -861,8 +843,10 @@
+
+ ldo = kzalloc(sizeof(struct ldo_regulator), GFP_KERNEL);
+
+- if (!ldo)
++ if (!ldo) {
++ dev_err(codec->dev, "failed to allocate ldo_regulator\n");
+ return -ENOMEM;
++ }
+
+ ldo->desc.name = kstrdup(dev_name(codec->dev), GFP_KERNEL);
+ if (!ldo->desc.name) {
+@@ -882,6 +866,7 @@
+ config.dev = codec->dev;
+ config.driver_data = ldo;
+ config.init_data = init_data;
++ config.ena_gpio = -EINVAL;
+
+ ldo->dev = regulator_register(&ldo->desc, &config);
+ if (IS_ERR(ldo->dev)) {
+@@ -1091,10 +1076,90 @@
+ }
+ }
+
++#ifdef CONFIG_SUSPEND
++static int sgtl5000_suspend(struct snd_soc_codec *codec)
++{
++ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_OFF);
++
++ return 0;
++}
++
++/*
++ * restore all sgtl5000 registers,
++ * since a big hole between dap and regular registers,
++ * we will restore them respectively.
++ */
++static int sgtl5000_restore_regs(struct snd_soc_codec *codec)
++{
++ u16 *cache = codec->reg_cache;
++ u16 reg;
++
++ /* restore regular registers */
++ for (reg = 0; reg <= SGTL5000_CHIP_SHORT_CTRL; reg += 2) {
++
++ /* These regs should restore in particular order */
++ if (reg == SGTL5000_CHIP_ANA_POWER ||
++ reg == SGTL5000_CHIP_CLK_CTRL ||
++ reg == SGTL5000_CHIP_LINREG_CTRL ||
++ reg == SGTL5000_CHIP_LINE_OUT_CTRL ||
++ reg == SGTL5000_CHIP_REF_CTRL)
++ continue;
++
++ snd_soc_write(codec, reg, cache[reg]);
++ }
++
++ /* restore dap registers */
++ for (reg = SGTL5000_DAP_REG_OFFSET; reg < SGTL5000_MAX_REG_OFFSET; reg += 2)
++ snd_soc_write(codec, reg, cache[reg]);
++
++ /*
++ * restore these regs according to the power setting sequence in
++ * sgtl5000_set_power_regs() and clock setting sequence in
++ * sgtl5000_set_clock().
++ *
++ * The order of restore is:
++ * 1. SGTL5000_CHIP_CLK_CTRL MCLK_FREQ bits (1:0) should be restore after
++ * SGTL5000_CHIP_ANA_POWER PLL bits set
++ * 2. SGTL5000_CHIP_LINREG_CTRL should be set before
++ * SGTL5000_CHIP_ANA_POWER LINREG_D restored
++ * 3. SGTL5000_CHIP_REF_CTRL controls Analog Ground Voltage,
++ * prefer to resotre it after SGTL5000_CHIP_ANA_POWER restored
++ */
++ snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL,
++ cache[SGTL5000_CHIP_LINREG_CTRL]);
++
++ snd_soc_write(codec, SGTL5000_CHIP_ANA_POWER,
++ cache[SGTL5000_CHIP_ANA_POWER]);
++
++ snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL,
++ cache[SGTL5000_CHIP_CLK_CTRL]);
++
++ snd_soc_write(codec, SGTL5000_CHIP_REF_CTRL,
++ cache[SGTL5000_CHIP_REF_CTRL]);
++
++ snd_soc_write(codec, SGTL5000_CHIP_LINE_OUT_CTRL,
++ cache[SGTL5000_CHIP_LINE_OUT_CTRL]);
++ return 0;
++}
++
++static int sgtl5000_resume(struct snd_soc_codec *codec)
++{
++ /* Bring the codec back up to standby to enable regulators */
++ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
++
++ /* Restore registers by cached in memory */
++ sgtl5000_restore_regs(codec);
++ return 0;
++}
++#else
++#define sgtl5000_suspend NULL
++#define sgtl5000_resume NULL
++#endif /* CONFIG_SUSPEND */
++
+ /*
+ * sgtl5000 has 3 internal power supplies:
+ * 1. VAG, normally set to vdda/2
+- * 2. charge pump, set to different value
++ * 2. chargepump, set to different value
+ * according to voltage of vdda and vddio
+ * 3. line out VAG, normally set to vddio/2
+ *
+@@ -1267,7 +1332,7 @@
+ return ret;
+ }
+
+- ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
++ ret = devm_regulator_bulk_get(codec->dev, ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+ goto err_ldo_remove;
+@@ -1275,16 +1340,13 @@
+ ret = regulator_bulk_enable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+ if (ret)
+- goto err_regulator_free;
++ goto err_ldo_remove;
+
+ /* wait for all power rails bring up */
+ udelay(10);
+
+ return 0;
+
+-err_regulator_free:
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
+ err_ldo_remove:
+ if (!external_vddd)
+ ldo_regulator_remove(codec);
+@@ -1297,6 +1359,17 @@
+ int ret;
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+
++ /* setup i2c data ops */
++ codec->control_data = sgtl5000->regmap;
++ ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP);
++ if (ret < 0) {
++ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
++ return ret;
++ }
++
++ if (!devres_open_group(codec->dev, NULL, GFP_KERNEL))
++ return -ENOMEM;
++
+ ret = sgtl5000_enable_regulators(codec);
+ if (ret)
+ return ret;
+@@ -1334,13 +1407,8 @@
+ SGTL5000_HP_ZCD_EN |
+ SGTL5000_ADC_ZCD_EN);
+
+- snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
+- SGTL5000_BIAS_R_MASK,
+- sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
++ snd_soc_write(codec, SGTL5000_CHIP_MIC_CTRL, 2);
+
+- snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
+- SGTL5000_BIAS_R_MASK,
+- sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
+ /*
+ * disable DAP
+ * TODO:
+@@ -1348,13 +1416,19 @@
+ */
+ snd_soc_write(codec, SGTL5000_DAP_CTRL, 0);
+
++ /* leading to standby state */
++ ret = sgtl5000_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
++ if (ret)
++ goto err;
++
+ return 0;
+
+ err:
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
++
++ devres_release_group(codec->dev, NULL);
++
+ ldo_regulator_remove(codec);
+
+ return ret;
+@@ -1364,10 +1438,13 @@
+ {
+ struct sgtl5000_priv *sgtl5000 = snd_soc_codec_get_drvdata(codec);
+
++ sgtl5000_set_bias_level(codec, SND_SOC_BIAS_OFF);
++
+ regulator_bulk_disable(ARRAY_SIZE(sgtl5000->supplies),
+ sgtl5000->supplies);
+- regulator_bulk_free(ARRAY_SIZE(sgtl5000->supplies),
+- sgtl5000->supplies);
++
++ devres_release_group(codec->dev, NULL);
++
+ ldo_regulator_remove(codec);
+
+ return 0;
+@@ -1376,8 +1453,9 @@
+ static struct snd_soc_codec_driver sgtl5000_driver = {
+ .probe = sgtl5000_probe,
+ .remove = sgtl5000_remove,
++ .suspend = sgtl5000_suspend,
++ .resume = sgtl5000_resume,
+ .set_bias_level = sgtl5000_set_bias_level,
+- .suspend_bias_off = true,
+ .controls = sgtl5000_snd_controls,
+ .num_controls = ARRAY_SIZE(sgtl5000_snd_controls),
+ .dapm_widgets = sgtl5000_dapm_widgets,
+@@ -1430,10 +1508,9 @@
+ {
+ struct sgtl5000_priv *sgtl5000;
+ int ret, reg, rev;
+- struct device_node *np = client->dev.of_node;
+- u32 value;
+
+- sgtl5000 = devm_kzalloc(&client->dev, sizeof(*sgtl5000), GFP_KERNEL);
++ sgtl5000 = devm_kzalloc(&client->dev, sizeof(struct sgtl5000_priv),
++ GFP_KERNEL);
+ if (!sgtl5000)
+ return -ENOMEM;
+
+@@ -1478,47 +1555,6 @@
+ dev_info(&client->dev, "sgtl5000 revision 0x%x\n", rev);
+ sgtl5000->revision = rev;
+
+- if (np) {
+- if (!of_property_read_u32(np,
+- "micbias-resistor-k-ohms", &value)) {
+- switch (value) {
+- case SGTL5000_MICBIAS_OFF:
+- sgtl5000->micbias_resistor = 0;
+- break;
+- case SGTL5000_MICBIAS_2K:
+- sgtl5000->micbias_resistor = 1;
+- break;
+- case SGTL5000_MICBIAS_4K:
+- sgtl5000->micbias_resistor = 2;
+- break;
+- case SGTL5000_MICBIAS_8K:
+- sgtl5000->micbias_resistor = 3;
+- break;
+- default:
+- sgtl5000->micbias_resistor = 2;
+- dev_err(&client->dev,
+- "Unsuitable MicBias resistor\n");
+- }
+- } else {
+- /* default is 4Kohms */
+- sgtl5000->micbias_resistor = 2;
+- }
+- if (!of_property_read_u32(np,
+- "micbias-voltage-m-volts", &value)) {
+- /* 1250mV => 0 */
+- /* steps of 250mV */
+- if ((value >= 1250) && (value <= 3000))
+- sgtl5000->micbias_voltage = (value / 250) - 5;
+- else {
+- sgtl5000->micbias_voltage = 0;
+- dev_err(&client->dev,
+- "Unsuitable MicBias resistor\n");
+- }
+- } else {
+- sgtl5000->micbias_voltage = 0;
+- }
+- }
+-
+ i2c_set_clientdata(client, sgtl5000);
+
+ /* Ensure sgtl5000 will start with sane register values */
+diff -Nur linux-4.1.3/sound/soc/fsl/fsl_asrc.h linux-xbian-imx6/sound/soc/fsl/fsl_asrc.h
+--- linux-4.1.3/sound/soc/fsl/fsl_asrc.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/fsl_asrc.h 2015-07-27 23:13:11.149203200 +0200
+@@ -326,7 +326,7 @@
+ OUTCLK_ASRCK1_CLK = 0x0f,
+ };
+
+-#define ASRC_CLK_MAX_NUM 16
++#define ASRC_CLK_MAX_NUM 1
+
+ enum asrc_word_width {
+ ASRC_WIDTH_24_BIT = 0,
+diff -Nur linux-4.1.3/sound/soc/fsl/fsl_esai.c linux-xbian-imx6/sound/soc/fsl/fsl_esai.c
+--- linux-4.1.3/sound/soc/fsl/fsl_esai.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/fsl_esai.c 2015-07-27 23:13:11.149203200 +0200
+@@ -839,7 +839,7 @@
+ return ret;
+ }
+
+- ret = imx_pcm_dma_init(pdev);
++ ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
+ if (ret)
+ dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
+
+diff -Nur linux-4.1.3/sound/soc/fsl/fsl_hdmi.c linux-xbian-imx6/sound/soc/fsl/fsl_hdmi.c
+--- linux-4.1.3/sound/soc/fsl/fsl_hdmi.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/sound/soc/fsl/fsl_hdmi.c 2015-07-27 23:13:11.149203200 +0200
+@@ -0,0 +1,614 @@
++/*
++ * ALSA SoC HDMI Audio Layer for Freescale i.MX
++ *
++ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
++ *
++ * Some code from patch_hdmi.c
++ * Copyright (c) 2008-2010 Intel Corporation. All rights reserved.
++ * Copyright (c) 2006 ATI Technologies Inc.
++ * Copyright (c) 2008 NVIDIA Corp. All rights reserved.
++ * Copyright (c) 2008 Wei Ni <wni@nvidia.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/dma-mapping.h>
++#include <linux/slab.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <sound/pcm.h>
++#include <sound/soc.h>
++#include <sound/asoundef.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "imx-hdmi.h"
++
++
++static struct mxc_edid_cfg edid_cfg;
++
++static u32 playback_rates[HDMI_MAX_RATES];
++static u32 playback_sample_size[HDMI_MAX_SAMPLE_SIZE];
++static u32 playback_channels[HDMI_MAX_CHANNEL_CONSTRAINTS];
++
++static struct snd_pcm_hw_constraint_list playback_constraint_rates;
++static struct snd_pcm_hw_constraint_list playback_constraint_bits;
++static struct snd_pcm_hw_constraint_list playback_constraint_channels;
++
++#ifdef DEBUG
++static void dumpregs(struct snd_soc_dai *dai)
++{
++ u32 n, cts;
++
++ cts = (hdmi_readb(HDMI_AUD_CTS3) << 16) |
++ (hdmi_readb(HDMI_AUD_CTS2) << 8) |
++ hdmi_readb(HDMI_AUD_CTS1);
++
++ n = (hdmi_readb(HDMI_AUD_N3) << 16) |
++ (hdmi_readb(HDMI_AUD_N2) << 8) |
++ hdmi_readb(HDMI_AUD_N1);
++
++ dev_dbg(dai->dev, "HDMI_PHY_CONF0 0x%02x\n",
++ hdmi_readb(HDMI_PHY_CONF0));
++ dev_dbg(dai->dev, "HDMI_MC_CLKDIS 0x%02x\n",
++ hdmi_readb(HDMI_MC_CLKDIS));
++ dev_dbg(dai->dev, "HDMI_AUD_N[1-3] 0x%06x (%d)\n",
++ n, n);
++ dev_dbg(dai->dev, "HDMI_AUD_CTS[1-3] 0x%06x (%d)\n",
++ cts, cts);
++ dev_dbg(dai->dev, "HDMI_FC_AUDSCONF 0x%02x\n",
++ hdmi_readb(HDMI_FC_AUDSCONF));
++}
++#else
++static void dumpregs(struct snd_soc_dai *dai) {}
++#endif
++
++enum cea_speaker_placement {
++ FL = (1 << 0), /* Front Left */
++ FC = (1 << 1), /* Front Center */
++ FR = (1 << 2), /* Front Right */
++ FLC = (1 << 3), /* Front Left Center */
++ FRC = (1 << 4), /* Front Right Center */
++ RL = (1 << 5), /* Rear Left */
++ RC = (1 << 6), /* Rear Center */
++ RR = (1 << 7), /* Rear Right */
++ RLC = (1 << 8), /* Rear Left Center */
++ RRC = (1 << 9), /* Rear Right Center */
++ LFE = (1 << 10), /* Low Frequency Effect */
++ FLW = (1 << 11), /* Front Left Wide */
++ FRW = (1 << 12), /* Front Right Wide */
++ FLH = (1 << 13), /* Front Left High */
++ FCH = (1 << 14), /* Front Center High */
++ FRH = (1 << 15), /* Front Right High */
++ TC = (1 << 16), /* Top Center */
++};
++
++/*
++ * EDID SA bits in the CEA Speaker Allocation data block
++ */
++static int edid_speaker_allocation_bits[] = {
++ [0] = FL | FR,
++ [1] = LFE,
++ [2] = FC,
++ [3] = RL | RR,
++ [4] = RC,
++ [5] = FLC | FRC,
++ [6] = RLC | RRC,
++ [7] = FLW | FRW,
++ [8] = FLH | FRH,
++ [9] = TC,
++ [10] = FCH,
++};
++
++struct cea_channel_speaker_allocation {
++ int ca_index;
++ int speakers[8];
++
++ /* Derived values, just for convenience */
++ int channels;
++ int spk_mask;
++};
++
++/*
++ * This is an ordered list!
++ *
++ * The preceding ones have better chances to be selected by
++ * hdmi_channel_allocation().
++ */
++static struct cea_channel_speaker_allocation channel_allocations[] = {
++ /* channel: 7 6 5 4 3 2 1 0 */
++ { .ca_index = 0x00, .speakers = { 0, 0, 0, 0, 0, 0, FR, FL },},
++ /* 2.1 */
++ { .ca_index = 0x01, .speakers = { 0, 0, 0, 0, 0, LFE, FR, FL },},
++ /* Dolby Surround */
++ { .ca_index = 0x08, .speakers = { 0, 0, RR, RL, 0, 0, FR, FL },}, /* Prefer FL/FR/RL/RR over FL/FR/LFE/FC */
++ { .ca_index = 0x02, .speakers = { 0, 0, 0, 0, FC, 0, FR, FL },},
++ { .ca_index = 0x03, .speakers = { 0, 0, 0, 0, FC, LFE, FR, FL },},
++ { .ca_index = 0x04, .speakers = { 0, 0, 0, RC, 0, 0, FR, FL },},
++ { .ca_index = 0x05, .speakers = { 0, 0, 0, RC, 0, LFE, FR, FL },},
++ { .ca_index = 0x06, .speakers = { 0, 0, 0, RC, FC, 0, FR, FL },},
++ { .ca_index = 0x07, .speakers = { 0, 0, 0, RC, FC, LFE, FR, FL },},
++ { .ca_index = 0x09, .speakers = { 0, 0, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x0a, .speakers = { 0, 0, RR, RL, FC, 0, FR, FL },},
++ /* surround51 */
++ { .ca_index = 0x0b, .speakers = { 0, 0, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x0c, .speakers = { 0, RC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x0d, .speakers = { 0, RC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x0e, .speakers = { 0, RC, RR, RL, FC, 0, FR, FL },},
++ /* 6.1 */
++ { .ca_index = 0x0f, .speakers = { 0, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x10, .speakers = { RRC, RLC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x11, .speakers = { RRC, RLC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x12, .speakers = { RRC, RLC, RR, RL, FC, 0, FR, FL },},
++ /* surround71 */
++ { .ca_index = 0x13, .speakers = { RRC, RLC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x14, .speakers = { FRC, FLC, 0, 0, 0, 0, FR, FL },},
++ { .ca_index = 0x15, .speakers = { FRC, FLC, 0, 0, 0, LFE, FR, FL },},
++ { .ca_index = 0x16, .speakers = { FRC, FLC, 0, 0, FC, 0, FR, FL },},
++ { .ca_index = 0x17, .speakers = { FRC, FLC, 0, 0, FC, LFE, FR, FL },},
++ { .ca_index = 0x18, .speakers = { FRC, FLC, 0, RC, 0, 0, FR, FL },},
++ { .ca_index = 0x19, .speakers = { FRC, FLC, 0, RC, 0, LFE, FR, FL },},
++ { .ca_index = 0x1a, .speakers = { FRC, FLC, 0, RC, FC, 0, FR, FL },},
++ { .ca_index = 0x1b, .speakers = { FRC, FLC, 0, RC, FC, LFE, FR, FL },},
++ { .ca_index = 0x1c, .speakers = { FRC, FLC, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x1d, .speakers = { FRC, FLC, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x1e, .speakers = { FRC, FLC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x1f, .speakers = { FRC, FLC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x20, .speakers = { 0, FCH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x21, .speakers = { 0, FCH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x22, .speakers = { TC, 0, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x23, .speakers = { TC, 0, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x24, .speakers = { FRH, FLH, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x25, .speakers = { FRH, FLH, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x26, .speakers = { FRW, FLW, RR, RL, 0, 0, FR, FL },},
++ { .ca_index = 0x27, .speakers = { FRW, FLW, RR, RL, 0, LFE, FR, FL },},
++ { .ca_index = 0x28, .speakers = { TC, RC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x29, .speakers = { TC, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2a, .speakers = { FCH, RC, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2b, .speakers = { FCH, RC, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2c, .speakers = { TC, FCH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2d, .speakers = { TC, FCH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x2e, .speakers = { FRH, FLH, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x2f, .speakers = { FRH, FLH, RR, RL, FC, LFE, FR, FL },},
++ { .ca_index = 0x30, .speakers = { FRW, FLW, RR, RL, FC, 0, FR, FL },},
++ { .ca_index = 0x31, .speakers = { FRW, FLW, RR, RL, FC, LFE, FR, FL },},
++};
++
++/* Compute derived values in channel_allocations[] */
++static void init_channel_allocations(void)
++{
++ struct cea_channel_speaker_allocation *p;
++ int i, j;
++
++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
++ p = channel_allocations + i;
++ p->channels = 0;
++ p->spk_mask = 0;
++ for (j = 0; j < ARRAY_SIZE(p->speakers); j++)
++ if (p->speakers[j]) {
++ p->channels++;
++ p->spk_mask |= p->speakers[j];
++ }
++ }
++}
++
++/*
++ * The transformation takes two steps:
++ *
++ * speaker_alloc => (edid_speaker_allocation_bits[]) => spk_mask
++ * spk_mask => (channel_allocations[]) => CA
++ *
++ * TODO: it could select the wrong CA from multiple candidates.
++*/
++static int hdmi_channel_allocation(int channels)
++{
++ int spk_mask = 0, ca = 0, i, tmpchn, tmpspk;
++
++ /* CA defaults to 0 for basic stereo audio */
++ if (channels <= 2)
++ return 0;
++
++ /*
++ * Expand EDID's speaker allocation mask
++ *
++ * EDID tells the speaker mask in a compact(paired) form,
++ * expand EDID's notions to match the ones used by Audio InfoFrame.
++ */
++ for (i = 0; i < ARRAY_SIZE(edid_speaker_allocation_bits); i++) {
++ if (edid_cfg.speaker_alloc & (1 << i))
++ spk_mask |= edid_speaker_allocation_bits[i];
++ }
++
++ /* Search for the first working match in the CA table */
++ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
++ tmpchn = channel_allocations[i].channels;
++ tmpspk = channel_allocations[i].spk_mask;
++
++ if (channels == tmpchn && (spk_mask & tmpspk) == tmpspk) {
++ ca = channel_allocations[i].ca_index;
++ break;
++ }
++ }
++
++ return ca;
++}
++
++static void hdmi_set_audio_infoframe(unsigned int channels)
++{
++ u8 audiconf0, audiconf2;
++
++ /*
++ * From CEA-861-D spec:
++ * HDMI requires the CT, SS and SF fields to be set to 0 ("Refer
++ * to Stream Header") as these items are carried in the audio stream.
++ *
++ * So we only set the CC and CA fields.
++ */
++ audiconf0 = ((channels - 1) << HDMI_FC_AUDICONF0_CC_OFFSET) &
++ HDMI_FC_AUDICONF0_CC_MASK;
++
++ audiconf2 = hdmi_channel_allocation(channels);
++
++ hdmi_writeb(audiconf0, HDMI_FC_AUDICONF0);
++ hdmi_writeb(0, HDMI_FC_AUDICONF1);
++ hdmi_writeb(audiconf2, HDMI_FC_AUDICONF2);
++ hdmi_writeb(0, HDMI_FC_AUDICONF3);
++}
++
++static int cea_audio_rates[HDMI_MAX_RATES] = {
++ 32000, 44100, 48000, 88200, 96000, 176400, 192000,
++};
++
++static void fsl_hdmi_get_playback_rates(void)
++{
++ int i, count = 0;
++ u8 rates;
++
++ /* Always assume basic audio support */
++ rates = edid_cfg.sample_rates | 0x7;
++
++ for (i = 0 ; i < HDMI_MAX_RATES ; i++)
++ if ((rates & (1 << i)) != 0)
++ playback_rates[count++] = cea_audio_rates[i];
++
++ playback_constraint_rates.list = playback_rates;
++ playback_constraint_rates.count = count;
++
++ for (i = 0 ; i < playback_constraint_rates.count ; i++)
++ pr_debug("%s: constraint = %d Hz\n", __func__, playback_rates[i]);
++}
++
++static void fsl_hdmi_get_playback_sample_size(void)
++{
++ int i = 0;
++
++ /* Always assume basic audio support */
++ playback_sample_size[i++] = 16;
++
++ if (edid_cfg.sample_sizes & 0x4)
++ playback_sample_size[i++] = 32;
++
++ playback_constraint_bits.list = playback_sample_size;
++ playback_constraint_bits.count = i;
++
++ for (i = 0 ; i < playback_constraint_bits.count ; i++)
++ pr_debug("%s: constraint = %d bits\n", __func__, playback_sample_size[i]);
++}
++
++static void fsl_hdmi_get_playback_channels(void)
++{
++ int channels = 2, i = 0;
++
++ /* Always assume basic audio support */
++ playback_channels[i++] = channels;
++ channels += 2;
++
++ while ((i < HDMI_MAX_CHANNEL_CONSTRAINTS) &&
++ (channels <= edid_cfg.max_channels)) {
++ playback_channels[i++] = channels;
++ channels += 2;
++ }
++
++ playback_constraint_channels.list = playback_channels;
++ playback_constraint_channels.count = i;
++
++ for (i = 0 ; i < playback_constraint_channels.count ; i++)
++ pr_debug("%s: constraint = %d channels\n", __func__, playback_channels[i]);
++}
++
++static int fsl_hdmi_update_constraints(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ int edid_status, ret;
++
++ edid_status = hdmi_get_edid_cfg(&edid_cfg);
++
++ if (edid_status && !edid_cfg.hdmi_cap)
++ return -1;
++
++ fsl_hdmi_get_playback_rates();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
++ &playback_constraint_rates);
++ if (ret)
++ return ret;
++
++ fsl_hdmi_get_playback_sample_size();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
++ &playback_constraint_bits);
++ if (ret)
++ return ret;
++
++ fsl_hdmi_get_playback_channels();
++ ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
++ &playback_constraint_channels);
++ if (ret)
++ return ret;
++
++ ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int fsl_hdmi_soc_startup(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct imx_hdmi *hdmi_data = snd_soc_dai_get_drvdata(dai);
++ int ret;
++
++ ret = fsl_hdmi_update_constraints(substream);
++ if (ret < 0)
++ return ret;
++
++ clk_prepare_enable(hdmi_data->isfr_clk);
++ clk_prepare_enable(hdmi_data->iahb_clk);
++
++ dev_dbg(dai->dev, "%s hdmi clks: isfr:%d iahb:%d\n", __func__,
++ (int)clk_get_rate(hdmi_data->isfr_clk),
++ (int)clk_get_rate(hdmi_data->iahb_clk));
++
++ /* Indicates the subpacket represents a flatline sample */
++ hdmi_audio_writeb(FC_AUDSCONF, AUD_PACKET_SAMPFIT, 0x0);
++
++ return 0;
++}
++
++static void fsl_hdmi_soc_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct imx_hdmi *hdmi_data = snd_soc_dai_get_drvdata(dai);
++
++ clk_disable_unprepare(hdmi_data->iahb_clk);
++ clk_disable_unprepare(hdmi_data->isfr_clk);
++}
++
++static int fsl_hdmi_soc_prepare(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++
++ hdmi_set_audio_infoframe(runtime->channels);
++ hdmi_audio_writeb(FC_AUDSCONF, AUD_PACKET_LAYOUT,
++ (runtime->channels > 2) ? 0x1 : 0x0);
++ hdmi_set_sample_rate(runtime->rate);
++ dumpregs(dai);
++
++ return 0;
++}
++
++static struct snd_soc_dai_ops fsl_hdmi_soc_dai_ops = {
++ .startup = fsl_hdmi_soc_startup,
++ .shutdown = fsl_hdmi_soc_shutdown,
++ .prepare = fsl_hdmi_soc_prepare,
++};
++
++/* IEC60958 status functions */
++static int fsl_hdmi_iec_info(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_info *uinfo)
++{
++ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
++ uinfo->count = 1;
++
++ return 0;
++}
++
++
++static int fsl_hdmi_iec_get(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uvalue)
++{
++ int i;
++
++ for (i = 0 ; i < 6 ; i++)
++ uvalue->value.iec958.status[i] = iec_header.status[i];
++
++ return 0;
++}
++
++static int fsl_hdmi_iec_put(struct snd_kcontrol *kcontrol,
++ struct snd_ctl_elem_value *uvalue)
++{
++ int i;
++
++ /* Do not allow professional mode */
++ if (uvalue->value.iec958.status[0] & IEC958_AES0_PROFESSIONAL)
++ return -EPERM;
++
++ for (i = 0 ; i < 6 ; i++) {
++ iec_header.status[i] = uvalue->value.iec958.status[i];
++ pr_debug("%s status[%d]=0x%02x\n", __func__, i, iec_header.status[i]);
++ }
++
++ return 0;
++}
++
++static struct snd_kcontrol_new fsl_hdmi_ctrls[] = {
++ /* Status cchanel controller */
++ {
++ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
++ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
++ .access = SNDRV_CTL_ELEM_ACCESS_READ |
++ SNDRV_CTL_ELEM_ACCESS_WRITE |
++ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
++ .info = fsl_hdmi_iec_info,
++ .get = fsl_hdmi_iec_get,
++ .put = fsl_hdmi_iec_put,
++ },
++};
++
++static int fsl_hdmi_soc_dai_probe(struct snd_soc_dai *dai)
++{
++ int ret;
++
++ init_channel_allocations();
++
++ ret = snd_soc_add_dai_controls(dai, fsl_hdmi_ctrls,
++ ARRAY_SIZE(fsl_hdmi_ctrls));
++ if (ret)
++ dev_warn(dai->dev, "failed to add dai controls\n");
++
++ return 0;
++}
++
++static struct snd_soc_dai_driver fsl_hdmi_dai = {
++ .probe = &fsl_hdmi_soc_dai_probe,
++ .playback = {
++ .channels_min = 2,
++ .channels_max = 8,
++ .rates = MXC_HDMI_RATES_PLAYBACK,
++ .formats = MXC_HDMI_FORMATS_PLAYBACK,
++ },
++ .ops = &fsl_hdmi_soc_dai_ops,
++};
++
++static const struct snd_soc_component_driver fsl_hdmi_component = {
++ .name = "fsl-hdmi",
++};
++
++static int fsl_hdmi_dai_probe(struct platform_device *pdev)
++{
++ struct device_node *np = pdev->dev.of_node;
++ struct imx_hdmi *hdmi_data;
++ int ret = 0;
++
++ if (!np)
++ return -ENODEV;
++
++ if (!hdmi_get_registered()) {
++ dev_err(&pdev->dev, "failed to probe. Load HDMI-video first.\n");
++ return -ENOMEM;
++ }
++
++ hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
++ if (!hdmi_data) {
++ dev_err(&pdev->dev, "failed to alloc hdmi_data\n");
++ return -ENOMEM;
++ }
++
++ hdmi_data->pdev = pdev;
++
++ memcpy(&hdmi_data->cpu_dai_drv, &fsl_hdmi_dai, sizeof(fsl_hdmi_dai));
++ hdmi_data->cpu_dai_drv.name = np->name;
++
++ hdmi_data->isfr_clk = devm_clk_get(&pdev->dev, "hdmi_isfr");
++ if (IS_ERR(hdmi_data->isfr_clk)) {
++ ret = PTR_ERR(hdmi_data->isfr_clk);
++ dev_err(&pdev->dev, "failed to get HDMI isfr clk: %d\n", ret);
++ return -EINVAL;
++ }
++
++ hdmi_data->iahb_clk = devm_clk_get(&pdev->dev, "hdmi_iahb");
++ if (IS_ERR(hdmi_data->iahb_clk)) {
++ ret = PTR_ERR(hdmi_data->iahb_clk);
++ dev_err(&pdev->dev, "failed to get HDMI ahb clk: %d\n", ret);
++ return -EINVAL;
++ }
++
++ dev_set_drvdata(&pdev->dev, hdmi_data);
++ ret = snd_soc_register_component(&pdev->dev, &fsl_hdmi_component,
++ &hdmi_data->cpu_dai_drv, 1);
++ if (ret) {
++ dev_err(&pdev->dev, "register DAI failed\n");
++ return ret;
++ }
++
++ hdmi_data->codec_dev = platform_device_register_simple(
++ "hdmi-audio-codec", -1, NULL, 0);
++ if (IS_ERR(hdmi_data->codec_dev)) {
++ dev_err(&pdev->dev, "failed to register HDMI audio codec\n");
++ ret = PTR_ERR(hdmi_data->codec_dev);
++ goto fail;
++ }
++
++ hdmi_data->dma_dev = platform_device_alloc("imx-hdmi-audio", -1);
++ if (IS_ERR(hdmi_data->dma_dev)) {
++ ret = PTR_ERR(hdmi_data->dma_dev);
++ goto fail_dma;
++ }
++
++ platform_set_drvdata(hdmi_data->dma_dev, hdmi_data);
++
++ ret = platform_device_add(hdmi_data->dma_dev);
++ if (ret) {
++ platform_device_put(hdmi_data->dma_dev);
++ goto fail_dma;
++ }
++
++ return 0;
++
++fail_dma:
++ platform_device_unregister(hdmi_data->codec_dev);
++fail:
++ snd_soc_unregister_component(&pdev->dev);
++
++ return ret;
++}
++
++static int fsl_hdmi_dai_remove(struct platform_device *pdev)
++{
++ struct imx_hdmi *hdmi_data = platform_get_drvdata(pdev);
++
++ platform_device_unregister(hdmi_data->dma_dev);
++ platform_device_unregister(hdmi_data->codec_dev);
++ snd_soc_unregister_component(&pdev->dev);
++
++ return 0;
++}
++
++static const struct of_device_id fsl_hdmi_dai_dt_ids[] = {
++ { .compatible = "fsl,imx6dl-hdmi-audio", },
++ { .compatible = "fsl,imx6q-hdmi-audio", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, fsl_hdmi_dai_dt_ids);
++
++static struct platform_driver fsl_hdmi_driver = {
++ .probe = fsl_hdmi_dai_probe,
++ .remove = fsl_hdmi_dai_remove,
++ .driver = {
++ .name = "fsl-hdmi-dai",
++ .owner = THIS_MODULE,
++ .of_match_table = fsl_hdmi_dai_dt_ids,
++ },
++};
++module_platform_driver(fsl_hdmi_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IMX HDMI TX DAI");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:fsl-hdmi-dai");
+diff -Nur linux-4.1.3/sound/soc/fsl/fsl_sai.c linux-xbian-imx6/sound/soc/fsl/fsl_sai.c
+--- linux-4.1.3/sound/soc/fsl/fsl_sai.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/fsl_sai.c 2015-07-27 23:13:11.149203200 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale ALSA SoC Digital Audio Interface (SAI) driver.
+ *
+- * Copyright 2012-2013 Freescale Semiconductor, Inc.
++ * Copyright 2012-2014 Freescale Semiconductor, Inc.
+ *
+ * This program is free software, you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+@@ -662,7 +662,7 @@
+ return ret;
+
+ if (sai->sai_on_imx)
+- return imx_pcm_dma_init(pdev);
++ return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
+ else
+ return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL,
+ SND_DMAENGINE_PCM_FLAG_NO_RESIDUE);
+diff -Nur linux-4.1.3/sound/soc/fsl/fsl_spdif.c linux-xbian-imx6/sound/soc/fsl/fsl_spdif.c
+--- linux-4.1.3/sound/soc/fsl/fsl_spdif.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/fsl_spdif.c 2015-07-27 23:13:11.149203200 +0200
+@@ -1,7 +1,7 @@
+ /*
+ * Freescale S/PDIF ALSA SoC Digital Audio Interface (DAI) driver
+ *
+- * Copyright (C) 2013 Freescale Semiconductor, Inc.
++ * Copyright (C) 2013-2014 Freescale Semiconductor, Inc.
+ *
+ * Based on stmp3xxx_spdif_dai.c
+ * Vladimir Barinov <vbarinov@embeddedalley.com>
+@@ -20,6 +20,8 @@
+ #include <linux/of_device.h>
+ #include <linux/of_irq.h>
+ #include <linux/regmap.h>
++#include <linux/pm_runtime.h>
++#include <linux/busfreq-imx6.h>
+
+ #include <sound/asoundef.h>
+ #include <sound/dmaengine_pcm.h>
+@@ -106,8 +108,11 @@
+ struct clk *rxclk;
+ struct clk *coreclk;
+ struct clk *sysclk;
++ struct clk *dmaclk;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
++ /* regcache for SRPC */
++ u32 regcache_srpc;
+ };
+
+ /* DPLL locked and lock loss interrupt handler */
+@@ -300,6 +305,8 @@
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 val, cycle = 1000;
+
++ regcache_cache_bypass(regmap, true);
++
+ regmap_write(regmap, REG_SPDIF_SCR, SCR_SOFT_RESET);
+
+ /*
+@@ -310,6 +317,10 @@
+ regmap_read(regmap, REG_SPDIF_SCR, &val);
+ } while ((val & SCR_SOFT_RESET) && cycle--);
+
++ regcache_cache_bypass(regmap, false);
++ regcache_mark_dirty(regmap);
++ regcache_sync(regmap);
++
+ if (cycle)
+ return 0;
+ else
+@@ -371,7 +382,6 @@
+ unsigned long csfs = 0;
+ u32 stc, mask, rate;
+ u8 clk, txclk_df, sysclk_df;
+- int ret;
+
+ switch (sample_rate) {
+ case 32000:
+@@ -413,21 +423,6 @@
+
+ sysclk_df = spdif_priv->sysclk_df[rate];
+
+- /* Don't mess up the clocks from other modules */
+- if (clk != STC_TXCLK_SPDIF_ROOT)
+- goto clk_set_bypass;
+-
+- /*
+- * The S/PDIF block needs a clock of 64 * fs * txclk_df.
+- * So request 64 * fs * (txclk_df + 1) to get rounded.
+- */
+- ret = clk_set_rate(spdif_priv->txclk[rate], 64 * sample_rate * (txclk_df + 1));
+- if (ret) {
+- dev_err(&pdev->dev, "failed to set tx clock rate\n");
+- return ret;
+- }
+-
+-clk_set_bypass:
+ dev_dbg(&pdev->dev, "expected clock rate = %d\n",
+ (64 * sample_rate * txclk_df * sysclk_df));
+ dev_dbg(&pdev->dev, "actual clock rate = %ld\n",
+@@ -459,6 +454,8 @@
+ u32 scr, mask, i;
+ int ret;
+
++ pm_runtime_get_sync(cpu_dai->dev);
++
+ /* Reset module and interrupts only for first initialization */
+ if (!cpu_dai->active) {
+ ret = clk_prepare_enable(spdif_priv->coreclk);
+@@ -467,6 +464,12 @@
+ return ret;
+ }
+
++ ret = clk_prepare_enable(spdif_priv->dmaclk);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to enable dma clock\n");
++ return ret;
++ }
++
+ ret = spdif_softreset(spdif_priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to soft reset\n");
+@@ -533,8 +536,11 @@
+ spdif_intr_status_clear(spdif_priv);
+ regmap_update_bits(regmap, REG_SPDIF_SCR,
+ SCR_LOW_POWER, SCR_LOW_POWER);
++ clk_disable_unprepare(spdif_priv->dmaclk);
+ clk_disable_unprepare(spdif_priv->coreclk);
+ }
++
++ pm_runtime_put_sync(cpu_dai->dev);
+ }
+
+ static int fsl_spdif_hw_params(struct snd_pcm_substream *substream,
+@@ -1015,6 +1021,27 @@
+ }
+ }
+
++static bool fsl_spdif_volatile_reg(struct device *dev, unsigned int reg)
++{
++ switch (reg) {
++ case REG_SPDIF_SRPC:
++ case REG_SPDIF_SIS:
++ case REG_SPDIF_SRL:
++ case REG_SPDIF_SRR:
++ case REG_SPDIF_SRCSH:
++ case REG_SPDIF_SRCSL:
++ case REG_SPDIF_SRU:
++ case REG_SPDIF_SRQ:
++ case REG_SPDIF_STL:
++ case REG_SPDIF_STR:
++ case REG_SPDIF_SRFM:
++ return true;
++ default:
++ return false;
++ }
++
++}
++
+ static bool fsl_spdif_writeable_reg(struct device *dev, unsigned int reg)
+ {
+ switch (reg) {
+@@ -1041,7 +1068,9 @@
+
+ .max_register = REG_SPDIF_STC,
+ .readable_reg = fsl_spdif_readable_reg,
++ .volatile_reg = fsl_spdif_volatile_reg,
+ .writeable_reg = fsl_spdif_writeable_reg,
++ .cache_type = REGCACHE_RBTREE,
+ };
+
+ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
+@@ -1049,8 +1078,8 @@
+ enum spdif_txrate index, bool round)
+ {
+ const u32 rate[] = { 32000, 44100, 48000, 96000, 192000 };
+- bool is_sysclk = clk_is_match(clk, spdif_priv->sysclk);
+- u64 rate_ideal, rate_actual, sub;
++ bool is_sysclk = clk == spdif_priv->sysclk;
++ u64 rate_actual, sub;
+ u32 sysclk_dfmin, sysclk_dfmax;
+ u32 txclk_df, sysclk_df, arate;
+
+@@ -1060,11 +1089,7 @@
+
+ for (sysclk_df = sysclk_dfmin; sysclk_df <= sysclk_dfmax; sysclk_df++) {
+ for (txclk_df = 1; txclk_df <= 128; txclk_df++) {
+- rate_ideal = rate[index] * (txclk_df + 1) * 64;
+- if (round)
+- rate_actual = clk_round_rate(clk, rate_ideal);
+- else
+- rate_actual = clk_get_rate(clk);
++ rate_actual = clk_get_rate(clk);
+
+ arate = rate_actual / 64;
+ arate /= txclk_df * sysclk_df;
+@@ -1214,6 +1239,13 @@
+ return PTR_ERR(spdif_priv->coreclk);
+ }
+
++ /* Get dma clock for dma script operation */
++ spdif_priv->dmaclk = devm_clk_get(&pdev->dev, "dma");
++ if (IS_ERR(spdif_priv->dmaclk)) {
++ dev_err(&pdev->dev, "no dma clock in devicetree\n");
++ return PTR_ERR(spdif_priv->dmaclk);
++ }
++
+ /* Select clock source for rx/tx clock */
+ spdif_priv->rxclk = devm_clk_get(&pdev->dev, "rxtx1");
+ if (IS_ERR(spdif_priv->rxclk)) {
+@@ -1247,6 +1279,8 @@
+ spdif_priv->dma_params_tx.addr = res->start + REG_SPDIF_STL;
+ spdif_priv->dma_params_rx.addr = res->start + REG_SPDIF_SRL;
+
++ pm_runtime_enable(&pdev->dev);
++
+ /* Register with ASoC */
+ dev_set_drvdata(&pdev->dev, spdif_priv);
+
+@@ -1257,13 +1291,62 @@
+ return ret;
+ }
+
+- ret = imx_pcm_dma_init(pdev);
++ ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
+ if (ret)
+ dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
+
+ return ret;
+ }
+
++#ifdef CONFIG_PM
++static int fsl_spdif_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++
++static int fsl_spdif_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_HIGH);
++ return 0;
++}
++#endif
++
++#ifdef CONFIG_PM_SLEEP
++static int fsl_spdif_suspend(struct device *dev)
++{
++ struct fsl_spdif_priv *spdif_priv = dev_get_drvdata(dev);
++
++ regmap_read(spdif_priv->regmap, REG_SPDIF_SRPC,
++ &spdif_priv->regcache_srpc);
++
++ regcache_cache_only(spdif_priv->regmap, true);
++ regcache_mark_dirty(spdif_priv->regmap);
++
++ return 0;
++}
++
++static int fsl_spdif_resume(struct device *dev)
++{
++ struct fsl_spdif_priv *spdif_priv = dev_get_drvdata(dev);
++
++ regcache_cache_only(spdif_priv->regmap, false);
++
++ regmap_update_bits(spdif_priv->regmap, REG_SPDIF_SRPC,
++ SRPC_CLKSRC_SEL_MASK | SRPC_GAINSEL_MASK,
++ spdif_priv->regcache_srpc);
++
++ return regcache_sync(spdif_priv->regmap);
++}
++#endif /* CONFIG_PM_SLEEP */
++
++static const struct dev_pm_ops fsl_spdif_pm = {
++ SET_RUNTIME_PM_OPS(fsl_spdif_runtime_suspend,
++ fsl_spdif_runtime_resume,
++ NULL)
++ SET_SYSTEM_SLEEP_PM_OPS(fsl_spdif_suspend, fsl_spdif_resume)
++};
++
+ static const struct of_device_id fsl_spdif_dt_ids[] = {
+ { .compatible = "fsl,imx35-spdif", },
+ { .compatible = "fsl,vf610-spdif", },
+@@ -1275,6 +1358,7 @@
+ .driver = {
+ .name = "fsl-spdif-dai",
+ .of_match_table = fsl_spdif_dt_ids,
++ .pm = &fsl_spdif_pm,
+ },
+ .probe = fsl_spdif_probe,
+ };
+diff -Nur linux-4.1.3/sound/soc/fsl/fsl_ssi.c linux-xbian-imx6/sound/soc/fsl/fsl_ssi.c
+--- linux-4.1.3/sound/soc/fsl/fsl_ssi.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/fsl_ssi.c 2015-07-27 23:13:11.153188979 +0200
+@@ -3,7 +3,7 @@
+ *
+ * Author: Timur Tabi <timur@freescale.com>
+ *
+- * Copyright 2007-2010 Freescale Semiconductor, Inc.
++ * Copyright (C) 2007-2013 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+@@ -30,19 +30,21 @@
+ * around this by not polling these bits but only wait a fixed delay.
+ */
+
++#include <linux/busfreq-imx6.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/interrupt.h>
+ #include <linux/clk.h>
++#include <linux/debugfs.h>
+ #include <linux/device.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+-#include <linux/of.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_platform.h>
++#include <linux/pm_runtime.h>
+
+ #include <sound/core.h>
+ #include <sound/pcm.h>
+@@ -54,6 +56,43 @@
+ #include "fsl_ssi.h"
+ #include "imx-pcm.h"
+
++#ifdef PPC
++#define read_ssi(addr) in_be32(addr)
++#define write_ssi(val, addr) out_be32(addr, val)
++#define write_ssi_mask(addr, clear, set) clrsetbits_be32(addr, clear, set)
++#else
++#define read_ssi(addr) readl(addr)
++#define write_ssi(val, addr) writel(val, addr)
++/*
++ * FIXME: Proper locking should be added at write_ssi_mask caller level
++ * to ensure this register read/modify/write sequence is race free.
++ */
++static inline void write_ssi_mask(u32 __iomem *addr, u32 clear, u32 set)
++{
++ u32 val = readl(addr);
++ val = (val & ~clear) | set;
++ writel(val, addr);
++}
++#endif
++
++#ifdef DEBUG
++#define NUM_OF_SSI_REG (sizeof(struct ccsr_ssi) / sizeof(__be32))
++
++void dump_reg(struct ccsr_ssi __iomem *ssi)
++{
++ u32 val, i;
++
++ for (i = 0; i < NUM_OF_SSI_REG; i++) {
++ if (&ssi->stx0 + i == NULL)
++ continue;
++ val = read_ssi(&ssi->stx0 + i);
++ pr_debug("REG %x = %x\n", (u32)(&ssi->stx0 + i) & 0xff, val);
++ }
++}
++#else
++void dump_reg(struct ccsr_ssi __iomem *ssi) {}
++#endif
++
+ /**
+ * FSLSSI_I2S_RATES: sample rates supported by the I2S
+ *
+@@ -67,6 +106,8 @@
+ /**
+ * FSLSSI_I2S_FORMATS: audio formats supported by the SSI
+ *
++ * This driver currently only supports the SSI running in I2S slave mode.
++ *
+ * The SSI has a limitation in that the samples must be in the same byte
+ * order as the host CPU. This is because when multiple bytes are written
+ * to the STX register, the bytes and bits must be written in the same
+@@ -92,6 +133,8 @@
+ #define FSLSSI_SIER_DBG_TX_FLAGS (CCSR_SSI_SIER_TFE0_EN | \
+ CCSR_SSI_SIER_TLS_EN | CCSR_SSI_SIER_TFS_EN | \
+ CCSR_SSI_SIER_TUE0_EN | CCSR_SSI_SIER_TFRC_EN)
++#define FSLSSI_SISR_MASK (FSLSSI_SIER_DBG_RX_FLAGS | FSLSSI_SIER_DBG_TX_FLAGS)
++
+
+ enum fsl_ssi_type {
+ FSL_SSI_MCP8610,
+@@ -111,158 +154,105 @@
+ struct fsl_ssi_reg_val rx;
+ struct fsl_ssi_reg_val tx;
+ };
+-static const struct regmap_config fsl_ssi_regconfig = {
+- .max_register = CCSR_SSI_SACCDIS,
+- .reg_bits = 32,
+- .val_bits = 32,
+- .reg_stride = 4,
+- .val_format_endian = REGMAP_ENDIAN_NATIVE,
+-};
+-
+-struct fsl_ssi_soc_data {
+- bool imx;
+- bool offline_config;
+- u32 sisr_write_mask;
+-};
+
+ /**
+ * fsl_ssi_private: per-SSI private data
+ *
+- * @reg: Pointer to the regmap registers
+- * @irq: IRQ of this SSI
+- * @cpu_dai_drv: CPU DAI driver for this device
+- *
+- * @dai_fmt: DAI configuration this device is currently used with
+- * @i2s_mode: i2s and network mode configuration of the device. Is used to
+- * switch between normal and i2s/network mode
+- * mode depending on the number of channels
+- * @use_dma: DMA is used or FIQ with stream filter
+- * @use_dual_fifo: DMA with support for both FIFOs used
+- * @fifo_deph: Depth of the SSI FIFOs
+- * @rxtx_reg_val: Specific register settings for receive/transmit configuration
+- *
+- * @clk: SSI clock
+- * @baudclk: SSI baud clock for master mode
+- * @baudclk_streams: Active streams that are using baudclk
+- * @bitclk_freq: bitclock frequency set by .set_dai_sysclk
+- *
+- * @dma_params_tx: DMA transmit parameters
+- * @dma_params_rx: DMA receive parameters
++ * @ssi: pointer to the SSI's registers
+ * @ssi_phys: physical address of the SSI registers
+- *
+- * @fiq_params: FIQ stream filtering parameters
+- *
+- * @pdev: Pointer to pdev used for deprecated fsl-ssi sound card
+- *
+- * @dbg_stats: Debugging statistics
+- *
+- * @soc: SoC specifc data
++ * @irq: IRQ of this SSI
++ * @playback: the number of playback streams opened
++ * @capture: the number of capture streams opened
++ * @cpu_dai: the CPU DAI for this device
++ * @dev_attr: the sysfs device attribute structure
++ * @stats: SSI statistics
++ * @name: name for this device
+ */
+ struct fsl_ssi_private {
+- struct regmap *regs;
+- int irq;
++ struct ccsr_ssi __iomem *ssi;
++ dma_addr_t ssi_phys;
++ unsigned int irq;
++ unsigned int fifo_depth;
+ struct snd_soc_dai_driver cpu_dai_drv;
++ struct platform_device *pdev;
+
+- unsigned int dai_fmt;
+- u8 i2s_mode;
++ enum fsl_ssi_type hw_type;
++ bool new_binding;
++ bool ssi_on_imx;
++ bool imx_ac97;
+ bool use_dma;
++ bool baudclk_locked;
++ bool irq_stats;
++ bool offline_config;
+ bool use_dual_fifo;
+- bool has_ipg_clk_name;
+- unsigned int fifo_depth;
+- struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
+-
+- struct clk *clk;
++ u8 i2s_mode;
++ spinlock_t baudclk_lock;
+ struct clk *baudclk;
+- unsigned int baudclk_streams;
+- unsigned int bitclk_freq;
+-
+- /* DMA params */
++ struct clk *clk;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+- dma_addr_t ssi_phys;
+-
+- /* params for non-dma FIQ stream filtered mode */
+ struct imx_pcm_fiq_params fiq_params;
++ /* Register values for rx/tx configuration */
++ struct fsl_ssi_rxtx_reg_val rxtx_reg_val;
+
+- /* Used when using fsl-ssi as sound-card. This is only used by ppc and
+- * should be replaced with simple-sound-card. */
+- struct platform_device *pdev;
+-
+- struct fsl_ssi_dbg dbg_stats;
+-
+- const struct fsl_ssi_soc_data *soc;
+-};
+-
+-/*
+- * imx51 and later SoCs have a slightly different IP that allows the
+- * SSI configuration while the SSI unit is running.
+- *
+- * More important, it is necessary on those SoCs to configure the
+- * sperate TX/RX DMA bits just before starting the stream
+- * (fsl_ssi_trigger). The SDMA unit has to be configured before fsl_ssi
+- * sends any DMA requests to the SDMA unit, otherwise it is not defined
+- * how the SDMA unit handles the DMA request.
+- *
+- * SDMA units are present on devices starting at imx35 but the imx35
+- * reference manual states that the DMA bits should not be changed
+- * while the SSI unit is running (SSIEN). So we support the necessary
+- * online configuration of fsl-ssi starting at imx51.
+- */
++ struct {
++ unsigned int rfrc;
++ unsigned int tfrc;
++ unsigned int cmdau;
++ unsigned int cmddu;
++ unsigned int rxt;
++ unsigned int rdr1;
++ unsigned int rdr0;
++ unsigned int tde1;
++ unsigned int tde0;
++ unsigned int roe1;
++ unsigned int roe0;
++ unsigned int tue1;
++ unsigned int tue0;
++ unsigned int tfs;
++ unsigned int rfs;
++ unsigned int tls;
++ unsigned int rls;
++ unsigned int rff1;
++ unsigned int rff0;
++ unsigned int tfe1;
++ unsigned int tfe0;
++ } stats;
++ struct dentry *dbg_dir;
++ struct dentry *dbg_stats;
+
+-static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = {
+- .imx = false,
+- .offline_config = true,
+- .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
+- CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
+- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
++ char name[1];
+ };
+
+-static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
+- .imx = true,
+- .offline_config = true,
+- .sisr_write_mask = 0,
+-};
++#ifdef CONFIG_PM
++static int fsl_ssi_runtime_resume(struct device *dev)
++{
++ request_bus_freq(BUS_FREQ_AUDIO);
++ return 0;
++}
+
+-static struct fsl_ssi_soc_data fsl_ssi_imx35 = {
+- .imx = true,
+- .offline_config = true,
+- .sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
+- CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
+- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
+-};
++static int fsl_ssi_runtime_suspend(struct device *dev)
++{
++ release_bus_freq(BUS_FREQ_AUDIO);
++ return 0;
++}
++#endif
+
+-static struct fsl_ssi_soc_data fsl_ssi_imx51 = {
+- .imx = true,
+- .offline_config = false,
+- .sisr_write_mask = CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
+- CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1,
++static const struct dev_pm_ops fsl_ssi_pm = {
++ SET_RUNTIME_PM_OPS(fsl_ssi_runtime_suspend,
++ fsl_ssi_runtime_resume,
++ NULL)
+ };
+
+ static const struct of_device_id fsl_ssi_ids[] = {
+- { .compatible = "fsl,mpc8610-ssi", .data = &fsl_ssi_mpc8610 },
+- { .compatible = "fsl,imx51-ssi", .data = &fsl_ssi_imx51 },
+- { .compatible = "fsl,imx35-ssi", .data = &fsl_ssi_imx35 },
+- { .compatible = "fsl,imx21-ssi", .data = &fsl_ssi_imx21 },
++ { .compatible = "fsl,mpc8610-ssi", .data = (void *) FSL_SSI_MCP8610},
++ { .compatible = "fsl,imx51-ssi", .data = (void *) FSL_SSI_MX51},
++ { .compatible = "fsl,imx35-ssi", .data = (void *) FSL_SSI_MX35},
++ { .compatible = "fsl,imx21-ssi", .data = (void *) FSL_SSI_MX21},
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, fsl_ssi_ids);
+
+-static bool fsl_ssi_is_ac97(struct fsl_ssi_private *ssi_private)
+-{
+- return !!(ssi_private->dai_fmt & SND_SOC_DAIFMT_AC97);
+-}
+-
+-static bool fsl_ssi_is_i2s_master(struct fsl_ssi_private *ssi_private)
+-{
+- return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+- SND_SOC_DAIFMT_CBS_CFS;
+-}
+-
+-static bool fsl_ssi_is_i2s_cbm_cfs(struct fsl_ssi_private *ssi_private)
+-{
+- return (ssi_private->dai_fmt & SND_SOC_DAIFMT_MASTER_MASK) ==
+- SND_SOC_DAIFMT_CBM_CFS;
+-}
+ /**
+ * fsl_ssi_isr: SSI interrupt handler
+ *
+@@ -278,74 +268,281 @@
+ static irqreturn_t fsl_ssi_isr(int irq, void *dev_id)
+ {
+ struct fsl_ssi_private *ssi_private = dev_id;
+- struct regmap *regs = ssi_private->regs;
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
++ irqreturn_t ret = IRQ_NONE;
+ __be32 sisr;
+ __be32 sisr2;
++ __be32 sisr_write_mask = 0;
++
++ switch (ssi_private->hw_type) {
++ case FSL_SSI_MX21:
++ sisr_write_mask = 0;
++ break;
++
++ case FSL_SSI_MCP8610:
++ case FSL_SSI_MX35:
++ sisr_write_mask = CCSR_SSI_SISR_RFRC | CCSR_SSI_SISR_TFRC |
++ CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
++ CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1;
++ break;
++
++ case FSL_SSI_MX51:
++ sisr_write_mask = CCSR_SSI_SISR_ROE0 | CCSR_SSI_SISR_ROE1 |
++ CCSR_SSI_SISR_TUE0 | CCSR_SSI_SISR_TUE1;
++ break;
++ }
+
+ /* We got an interrupt, so read the status register to see what we
+ were interrupted for. We mask it with the Interrupt Enable register
+ so that we only check for events that we're interested in.
+ */
+- regmap_read(regs, CCSR_SSI_SISR, &sisr);
++ sisr = read_ssi(&ssi->sisr) & FSLSSI_SISR_MASK;
+
+- sisr2 = sisr & ssi_private->soc->sisr_write_mask;
++ if (sisr & CCSR_SSI_SISR_RFRC) {
++ ssi_private->stats.rfrc++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TFRC) {
++ ssi_private->stats.tfrc++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_CMDAU) {
++ ssi_private->stats.cmdau++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_CMDDU) {
++ ssi_private->stats.cmddu++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_RXT) {
++ ssi_private->stats.rxt++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_RDR1) {
++ ssi_private->stats.rdr1++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_RDR0) {
++ ssi_private->stats.rdr0++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TDE1) {
++ ssi_private->stats.tde1++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TDE0) {
++ ssi_private->stats.tde0++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_ROE1) {
++ ssi_private->stats.roe1++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_ROE0) {
++ ssi_private->stats.roe0++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TUE1) {
++ ssi_private->stats.tue1++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TUE0) {
++ ssi_private->stats.tue0++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TFS) {
++ ssi_private->stats.tfs++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_RFS) {
++ ssi_private->stats.rfs++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TLS) {
++ ssi_private->stats.tls++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_RLS) {
++ ssi_private->stats.rls++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_RFF1) {
++ ssi_private->stats.rff1++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_RFF0) {
++ ssi_private->stats.rff0++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TFE1) {
++ ssi_private->stats.tfe1++;
++ ret = IRQ_HANDLED;
++ }
++
++ if (sisr & CCSR_SSI_SISR_TFE0) {
++ ssi_private->stats.tfe0++;
++ ret = IRQ_HANDLED;
++ }
++
++ sisr2 = sisr & sisr_write_mask;
+ /* Clear the bits that we set */
+ if (sisr2)
+- regmap_write(regs, CCSR_SSI_SISR, sisr2);
++ write_ssi(sisr2, &ssi->sisr);
++
++ return ret;
++}
++
++#if IS_ENABLED(CONFIG_DEBUG_FS)
++/* Show the statistics of a flag only if its interrupt is enabled. The
++ * compiler will optimze this code to a no-op if the interrupt is not
++ * enabled.
++ */
++#define SIER_SHOW(flag, name) \
++ do { \
++ if (FSLSSI_SISR_MASK & CCSR_SSI_SIER_##flag) \
++ seq_printf(s, #name "=%u\n", ssi_private->stats.name); \
++ } while (0)
++
++
++/**
++ * fsl_sysfs_ssi_show: display SSI statistics
++ *
++ * Display the statistics for the current SSI device. To avoid confusion,
++ * we only show those counts that are enabled.
++ */
++static int fsl_ssi_stats_show(struct seq_file *s, void *unused)
++{
++ struct fsl_ssi_private *ssi_private = s->private;
++
++ SIER_SHOW(RFRC_EN, rfrc);
++ SIER_SHOW(TFRC_EN, tfrc);
++ SIER_SHOW(CMDAU_EN, cmdau);
++ SIER_SHOW(CMDDU_EN, cmddu);
++ SIER_SHOW(RXT_EN, rxt);
++ SIER_SHOW(RDR1_EN, rdr1);
++ SIER_SHOW(RDR0_EN, rdr0);
++ SIER_SHOW(TDE1_EN, tde1);
++ SIER_SHOW(TDE0_EN, tde0);
++ SIER_SHOW(ROE1_EN, roe1);
++ SIER_SHOW(ROE0_EN, roe0);
++ SIER_SHOW(TUE1_EN, tue1);
++ SIER_SHOW(TUE0_EN, tue0);
++ SIER_SHOW(TFS_EN, tfs);
++ SIER_SHOW(RFS_EN, rfs);
++ SIER_SHOW(TLS_EN, tls);
++ SIER_SHOW(RLS_EN, rls);
++ SIER_SHOW(RFF1_EN, rff1);
++ SIER_SHOW(RFF0_EN, rff0);
++ SIER_SHOW(TFE1_EN, tfe1);
++ SIER_SHOW(TFE0_EN, tfe0);
++
++ return 0;
++}
++
++static int fsl_ssi_stats_open(struct inode *inode, struct file *file)
++{
++ return single_open(file, fsl_ssi_stats_show, inode->i_private);
++}
+
+- fsl_ssi_dbg_isr(&ssi_private->dbg_stats, sisr);
++static const struct file_operations fsl_ssi_stats_ops = {
++ .open = fsl_ssi_stats_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = single_release,
++};
++
++static int fsl_ssi_debugfs_create(struct fsl_ssi_private *ssi_private,
++ struct device *dev)
++{
++ ssi_private->dbg_dir = debugfs_create_dir(dev_name(dev), NULL);
++ if (!ssi_private->dbg_dir)
++ return -ENOMEM;
++
++ ssi_private->dbg_stats = debugfs_create_file("stats", S_IRUGO,
++ ssi_private->dbg_dir, ssi_private, &fsl_ssi_stats_ops);
++ if (!ssi_private->dbg_stats) {
++ debugfs_remove(ssi_private->dbg_dir);
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static void fsl_ssi_debugfs_remove(struct fsl_ssi_private *ssi_private)
++{
++ debugfs_remove(ssi_private->dbg_stats);
++ debugfs_remove(ssi_private->dbg_dir);
++}
++
++#else
++
++static int fsl_ssi_debugfs_create(struct fsl_ssi_private *ssi_private,
++ struct device *dev)
++{
++ return 0;
++}
+
+- return IRQ_HANDLED;
++static void fsl_ssi_debugfs_remove(struct fsl_ssi_private *ssi_private)
++{
+ }
+
++#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */
++
+ /*
+ * Enable/Disable all rx/tx config flags at once.
+ */
+ static void fsl_ssi_rxtx_config(struct fsl_ssi_private *ssi_private,
+ bool enable)
+ {
+- struct regmap *regs = ssi_private->regs;
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+ struct fsl_ssi_rxtx_reg_val *vals = &ssi_private->rxtx_reg_val;
+
+ if (enable) {
+- regmap_update_bits(regs, CCSR_SSI_SIER,
+- vals->rx.sier | vals->tx.sier,
+- vals->rx.sier | vals->tx.sier);
+- regmap_update_bits(regs, CCSR_SSI_SRCR,
+- vals->rx.srcr | vals->tx.srcr,
+- vals->rx.srcr | vals->tx.srcr);
+- regmap_update_bits(regs, CCSR_SSI_STCR,
+- vals->rx.stcr | vals->tx.stcr,
+- vals->rx.stcr | vals->tx.stcr);
++ write_ssi_mask(&ssi->sier, 0, vals->rx.sier | vals->tx.sier);
++ write_ssi_mask(&ssi->srcr, 0, vals->rx.srcr | vals->tx.srcr);
++ write_ssi_mask(&ssi->stcr, 0, vals->rx.stcr | vals->tx.stcr);
+ } else {
+- regmap_update_bits(regs, CCSR_SSI_SRCR,
+- vals->rx.srcr | vals->tx.srcr, 0);
+- regmap_update_bits(regs, CCSR_SSI_STCR,
+- vals->rx.stcr | vals->tx.stcr, 0);
+- regmap_update_bits(regs, CCSR_SSI_SIER,
+- vals->rx.sier | vals->tx.sier, 0);
++ write_ssi_mask(&ssi->srcr, vals->rx.srcr | vals->tx.srcr, 0);
++ write_ssi_mask(&ssi->stcr, vals->rx.stcr | vals->tx.stcr, 0);
++ write_ssi_mask(&ssi->sier, vals->rx.sier | vals->tx.sier, 0);
+ }
+ }
+
+-/*
+- * Calculate the bits that have to be disabled for the current stream that is
+- * getting disabled. This keeps the bits enabled that are necessary for the
+- * second stream to work if 'stream_active' is true.
+- *
+- * Detailed calculation:
+- * These are the values that need to be active after disabling. For non-active
+- * second stream, this is 0:
+- * vals_stream * !!stream_active
+- *
+- * The following computes the overall differences between the setup for the
+- * to-disable stream and the active stream, a simple XOR:
+- * vals_disable ^ (vals_stream * !!(stream_active))
+- *
+- * The full expression adds a mask on all values we care about
+- */
+-#define fsl_ssi_disable_val(vals_disable, vals_stream, stream_active) \
+- ((vals_disable) & \
+- ((vals_disable) ^ ((vals_stream) * (u32)!!(stream_active))))
++static void fsl_ssi_clk_ctrl(struct fsl_ssi_private *ssi_private, bool enable)
++{
++ if (enable) {
++ if (ssi_private->ssi_on_imx) {
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_enable(ssi_private->baudclk);
++ clk_enable(ssi_private->clk);
++ }
++ } else {
++ if (ssi_private->ssi_on_imx) {
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_disable(ssi_private->baudclk);
++ clk_disable(ssi_private->clk);
++ }
++ }
++}
+
+ /*
+ * Enable/Disable a ssi configuration. You have to pass either
+@@ -354,22 +551,12 @@
+ static void fsl_ssi_config(struct fsl_ssi_private *ssi_private, bool enable,
+ struct fsl_ssi_reg_val *vals)
+ {
+- struct regmap *regs = ssi_private->regs;
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+ struct fsl_ssi_reg_val *avals;
+- int nr_active_streams;
+- u32 scr_val;
+- int keep_active;
+-
+- regmap_read(regs, CCSR_SSI_SCR, &scr_val);
+-
+- nr_active_streams = !!(scr_val & CCSR_SSI_SCR_TE) +
++ u32 scr_val = read_ssi(&ssi->scr);
++ int nr_active_streams = !!(scr_val & CCSR_SSI_SCR_TE) +
+ !!(scr_val & CCSR_SSI_SCR_RE);
+
+- if (nr_active_streams - 1 > 0)
+- keep_active = 1;
+- else
+- keep_active = 0;
+-
+ /* Find the other direction values rx or tx which we do not want to
+ * modify */
+ if (&ssi_private->rxtx_reg_val.rx == vals)
+@@ -377,11 +564,12 @@
+ else
+ avals = &ssi_private->rxtx_reg_val.rx;
+
++ fsl_ssi_clk_ctrl(ssi_private, enable);
++
+ /* If vals should be disabled, start with disabling the unit */
+ if (!enable) {
+- u32 scr = fsl_ssi_disable_val(vals->scr, avals->scr,
+- keep_active);
+- regmap_update_bits(regs, CCSR_SSI_SCR, scr, 0);
++ u32 scr = vals->scr & (vals->scr ^ avals->scr);
++ write_ssi_mask(&ssi->scr, scr, 0);
+ }
+
+ /*
+@@ -389,9 +577,9 @@
+ * reconfiguration, so we have to enable all necessary flags at once
+ * even if we do not use them later (capture and playback configuration)
+ */
+- if (ssi_private->soc->offline_config) {
++ if (ssi_private->offline_config) {
+ if ((enable && !nr_active_streams) ||
+- (!enable && !keep_active))
++ (!enable && nr_active_streams == 1))
+ fsl_ssi_rxtx_config(ssi_private, enable);
+
+ goto config_done;
+@@ -402,9 +590,9 @@
+ * (online configuration)
+ */
+ if (enable) {
+- regmap_update_bits(regs, CCSR_SSI_SIER, vals->sier, vals->sier);
+- regmap_update_bits(regs, CCSR_SSI_SRCR, vals->srcr, vals->srcr);
+- regmap_update_bits(regs, CCSR_SSI_STCR, vals->stcr, vals->stcr);
++ write_ssi_mask(&ssi->sier, 0, vals->sier);
++ write_ssi_mask(&ssi->srcr, 0, vals->srcr);
++ write_ssi_mask(&ssi->stcr, 0, vals->stcr);
+ } else {
+ u32 sier;
+ u32 srcr;
+@@ -420,22 +608,19 @@
+ */
+
+ /* These assignments are simply vals without bits set in avals*/
+- sier = fsl_ssi_disable_val(vals->sier, avals->sier,
+- keep_active);
+- srcr = fsl_ssi_disable_val(vals->srcr, avals->srcr,
+- keep_active);
+- stcr = fsl_ssi_disable_val(vals->stcr, avals->stcr,
+- keep_active);
+-
+- regmap_update_bits(regs, CCSR_SSI_SRCR, srcr, 0);
+- regmap_update_bits(regs, CCSR_SSI_STCR, stcr, 0);
+- regmap_update_bits(regs, CCSR_SSI_SIER, sier, 0);
++ sier = vals->sier & (vals->sier ^ avals->sier);
++ srcr = vals->srcr & (vals->srcr ^ avals->srcr);
++ stcr = vals->stcr & (vals->stcr ^ avals->stcr);
++
++ write_ssi_mask(&ssi->srcr, srcr, 0);
++ write_ssi_mask(&ssi->stcr, stcr, 0);
++ write_ssi_mask(&ssi->sier, sier, 0);
+ }
+
+ config_done:
+ /* Enabling of subunits is done after configuration */
+ if (enable)
+- regmap_update_bits(regs, CCSR_SSI_SCR, vals->scr, vals->scr);
++ write_ssi_mask(&ssi->scr, 0, vals->scr);
+ }
+
+
+@@ -465,7 +650,7 @@
+ reg->tx.stcr = CCSR_SSI_STCR_TFEN0;
+ reg->tx.scr = 0;
+
+- if (!fsl_ssi_is_ac97(ssi_private)) {
++ if (!ssi_private->imx_ac97) {
+ reg->rx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE;
+ reg->rx.sier |= CCSR_SSI_SIER_RFF0_EN;
+ reg->tx.scr = CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE;
+@@ -486,197 +671,161 @@
+
+ static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private)
+ {
+- struct regmap *regs = ssi_private->regs;
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+
+ /*
+ * Setup the clock control register
+ */
+- regmap_write(regs, CCSR_SSI_STCCR,
+- CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
+- regmap_write(regs, CCSR_SSI_SRCCR,
+- CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13));
++ write_ssi(CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13),
++ &ssi->stccr);
++ write_ssi(CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13),
++ &ssi->srccr);
+
+ /*
+ * Enable AC97 mode and startup the SSI
+ */
+- regmap_write(regs, CCSR_SSI_SACNT,
+- CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
+- regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
+- regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
++ write_ssi(CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV,
++ &ssi->sacnt);
++ write_ssi(0xff, &ssi->saccdis);
++ write_ssi(0x300, &ssi->saccen);
+
+ /*
+ * Enable SSI, Transmit and Receive. AC97 has to communicate with the
+ * codec before a stream is started.
+ */
+- regmap_update_bits(regs, CCSR_SSI_SCR,
+- CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE,
+- CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE);
+-
+- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_WAIT(3));
+-}
+-
+-/**
+- * fsl_ssi_startup: create a new substream
+- *
+- * This is the first function called when a stream is opened.
+- *
+- * If this is the first stream open, then grab the IRQ and program most of
+- * the SSI registers.
+- */
+-static int fsl_ssi_startup(struct snd_pcm_substream *substream,
+- struct snd_soc_dai *dai)
+-{
+- struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct fsl_ssi_private *ssi_private =
+- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+- int ret;
+-
+- ret = clk_prepare_enable(ssi_private->clk);
+- if (ret)
+- return ret;
+-
+- /* When using dual fifo mode, it is safer to ensure an even period
+- * size. If appearing to an odd number while DMA always starts its
+- * task from fifo0, fifo1 would be neglected at the end of each
+- * period. But SSI would still access fifo1 with an invalid data.
+- */
+- if (ssi_private->use_dual_fifo)
+- snd_pcm_hw_constraint_step(substream->runtime, 0,
+- SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
++ write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_SSIEN |
++ CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE);
+
+- return 0;
++ write_ssi(CCSR_SSI_SOR_WAIT(3), &ssi->sor);
+ }
+
+-/**
+- * fsl_ssi_shutdown: shutdown the SSI
+- *
+- */
+-static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
+- struct snd_soc_dai *dai)
++static int fsl_ssi_setup(struct fsl_ssi_private *ssi_private)
+ {
+- struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct fsl_ssi_private *ssi_private =
+- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+-
+- clk_disable_unprepare(ssi_private->clk);
+-
+-}
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
++ u8 wm;
++ int synchronous = ssi_private->cpu_dai_drv.symmetric_rates;
+
+-/**
+- * fsl_ssi_set_bclk - configure Digital Audio Interface bit clock
+- *
+- * Note: This function can be only called when using SSI as DAI master
+- *
+- * Quick instruction for parameters:
+- * freq: Output BCLK frequency = samplerate * 32 (fixed) * channels
+- * dir: SND_SOC_CLOCK_OUT -> TxBCLK, SND_SOC_CLOCK_IN -> RxBCLK.
+- */
+-static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
+- struct snd_soc_dai *cpu_dai,
+- struct snd_pcm_hw_params *hw_params)
+-{
+- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
+- struct regmap *regs = ssi_private->regs;
+- int synchronous = ssi_private->cpu_dai_drv.symmetric_rates, ret;
+- u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
+- unsigned long clkrate, baudrate, tmprate;
+- u64 sub, savesub = 100000;
+- unsigned int freq;
+- bool baudclk_is_used;
++ fsl_ssi_setup_reg_vals(ssi_private);
+
+- /* Prefer the explicitly set bitclock frequency */
+- if (ssi_private->bitclk_freq)
+- freq = ssi_private->bitclk_freq;
++ if (ssi_private->imx_ac97)
++ ssi_private->i2s_mode = CCSR_SSI_SCR_I2S_MODE_NORMAL | CCSR_SSI_SCR_NET;
+ else
+- freq = params_channels(hw_params) * 32 * params_rate(hw_params);
+-
+- /* Don't apply it to any non-baudclk circumstance */
+- if (IS_ERR(ssi_private->baudclk))
+- return -EINVAL;
+-
+- baudclk_is_used = ssi_private->baudclk_streams & ~(BIT(substream->stream));
++ ssi_private->i2s_mode = CCSR_SSI_SCR_I2S_MODE_SLAVE;
+
+- /* It should be already enough to divide clock by setting pm alone */
+- psr = 0;
+- div2 = 0;
+-
+- factor = (div2 + 1) * (7 * psr + 1) * 2;
++ /*
++ * Section 16.5 of the MPC8610 reference manual says that the SSI needs
++ * to be disabled before updating the registers we set here.
++ */
++ write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_SSIEN, 0);
+
+- for (i = 0; i < 255; i++) {
+- tmprate = freq * factor * (i + 1);
++ /*
++ * Program the SSI into I2S Slave Non-Network Synchronous mode. Also
++ * enable the transmit and receive FIFO.
++ *
++ * FIXME: Little-endian samples require a different shift dir
++ */
++ write_ssi_mask(&ssi->scr,
++ CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_SYN,
++ CCSR_SSI_SCR_TFR_CLK_DIS |
++ ssi_private->i2s_mode |
++ (synchronous ? CCSR_SSI_SCR_SYN : 0));
+
+- if (baudclk_is_used)
+- clkrate = clk_get_rate(ssi_private->baudclk);
+- else
+- clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
++ write_ssi(CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFSI |
++ CCSR_SSI_STCR_TEFS | CCSR_SSI_STCR_TSCKP, &ssi->stcr);
+
+- /*
+- * Hardware limitation: The bclk rate must be
+- * never greater than 1/5 IPG clock rate
+- */
+- if (clkrate * 5 > clk_get_rate(ssi_private->clk))
+- continue;
++ write_ssi(CCSR_SSI_SRCR_RXBIT0 | CCSR_SSI_SRCR_RFSI |
++ CCSR_SSI_SRCR_REFS | CCSR_SSI_SRCR_RSCKP, &ssi->srcr);
+
+- clkrate /= factor;
+- afreq = clkrate / (i + 1);
++ /*
++ * The DC and PM bits are only used if the SSI is the clock master.
++ */
+
+- if (freq == afreq)
+- sub = 0;
+- else if (freq / afreq == 1)
+- sub = freq - afreq;
+- else if (afreq / freq == 1)
+- sub = afreq - freq;
+- else
+- continue;
++ /*
++ * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
++ * use FIFO 1. We program the transmit water to signal a DMA transfer
++ * if there are only two (or fewer) elements left in the FIFO. Two
++ * elements equals one frame (left channel, right channel). This value,
++ * however, depends on the depth of the transmit buffer.
++ *
++ * We set the watermark on the same level as the DMA burstsize. For
++ * fiq it is probably better to use the biggest possible watermark
++ * size.
++ */
++ if (ssi_private->use_dma)
++ wm = ssi_private->fifo_depth - 2;
++ else
++ wm = ssi_private->fifo_depth;
+
+- /* Calculate the fraction */
+- sub *= 100000;
+- do_div(sub, freq);
++ write_ssi(CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
++ CCSR_SSI_SFCSR_TFWM1(wm) | CCSR_SSI_SFCSR_RFWM1(wm),
++ &ssi->sfcsr);
+
+- if (sub < savesub) {
+- baudrate = tmprate;
+- savesub = sub;
+- pm = i;
+- }
++ /*
++ * For ac97 interrupts are enabled with the startup of the substream
++ * because it is also running without an active substream. Normally SSI
++ * is only enabled when there is a substream.
++ */
++ if (ssi_private->imx_ac97)
++ fsl_ssi_setup_ac97(ssi_private);
+
+- /* We are lucky */
+- if (savesub == 0)
+- break;
++ /*
++ * Set a default slot number so that there is no need for those common
++ * cases like I2S mode to call the extra set_tdm_slot() any more.
++ */
++ if (!ssi_private->imx_ac97) {
++ write_ssi_mask(&ssi->stccr, CCSR_SSI_SxCCR_DC_MASK,
++ CCSR_SSI_SxCCR_DC(2));
++ write_ssi_mask(&ssi->srccr, CCSR_SSI_SxCCR_DC_MASK,
++ CCSR_SSI_SxCCR_DC(2));
+ }
+
+- /* No proper pm found if it is still remaining the initial value */
+- if (pm == 999) {
+- dev_err(cpu_dai->dev, "failed to handle the required sysclk\n");
+- return -EINVAL;
++ if (ssi_private->use_dual_fifo) {
++ write_ssi_mask(&ssi->srcr, 0, CCSR_SSI_SRCR_RFEN1);
++ write_ssi_mask(&ssi->stcr, 0, CCSR_SSI_STCR_TFEN1);
++ write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_TCH_EN);
+ }
+
+- stccr = CCSR_SSI_SxCCR_PM(pm + 1) | (div2 ? CCSR_SSI_SxCCR_DIV2 : 0) |
+- (psr ? CCSR_SSI_SxCCR_PSR : 0);
+- mask = CCSR_SSI_SxCCR_PM_MASK | CCSR_SSI_SxCCR_DIV2 |
+- CCSR_SSI_SxCCR_PSR;
++ return 0;
++}
+
+- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || synchronous)
+- regmap_update_bits(regs, CCSR_SSI_STCCR, mask, stccr);
+- else
+- regmap_update_bits(regs, CCSR_SSI_SRCCR, mask, stccr);
+
+- if (!baudclk_is_used) {
+- ret = clk_set_rate(ssi_private->baudclk, baudrate);
+- if (ret) {
+- dev_err(cpu_dai->dev, "failed to set baudclk rate\n");
+- return -EINVAL;
+- }
+- }
++/**
++ * fsl_ssi_startup: create a new substream
++ *
++ * This is the first function called when a stream is opened.
++ *
++ * If this is the first stream open, then grab the IRQ and program most of
++ * the SSI registers.
++ */
++static int fsl_ssi_startup(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct fsl_ssi_private *ssi_private =
++ snd_soc_dai_get_drvdata(rtd->cpu_dai);
++ unsigned long flags;
+
+- return 0;
+-}
++ pm_runtime_get_sync(dai->dev);
+
+-static int fsl_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
+- int clk_id, unsigned int freq, int dir)
+-{
+- struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
++ /* First, we only do fsl_ssi_setup() when SSI is going to be active.
++ * Second, fsl_ssi_setup was already called by ac97_init earlier if
++ * the driver is in ac97 mode.
++ */
++ if (!dai->active && !ssi_private->imx_ac97) {
++ fsl_ssi_setup(ssi_private);
++ spin_lock_irqsave(&ssi_private->baudclk_lock, flags);
++ ssi_private->baudclk_locked = false;
++ spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags);
++ }
+
+- ssi_private->bitclk_freq = freq;
++ /* When using dual fifo mode, it is safer to ensure an even period
++ * size. If appearing to an odd number while DMA always starts its
++ * task from fifo0, fifo1 would be neglected at the end of each
++ * period. But SSI would still access fifo1 with an invalid data.
++ */
++ if (ssi_private->use_dual_fifo)
++ snd_pcm_hw_constraint_step(substream->runtime, 0,
++ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
+
+ return 0;
+ }
+@@ -698,17 +847,12 @@
+ struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai)
+ {
+ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
+- struct regmap *regs = ssi_private->regs;
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+ unsigned int channels = params_channels(hw_params);
+ unsigned int sample_size =
+ snd_pcm_format_width(params_format(hw_params));
+ u32 wl = CCSR_SSI_SxCCR_WL(sample_size);
+- int ret;
+- u32 scr_val;
+- int enabled;
+-
+- regmap_read(regs, CCSR_SSI_SCR, &scr_val);
+- enabled = scr_val & CCSR_SSI_SCR_SSIEN;
++ int enabled = read_ssi(&ssi->scr) & CCSR_SSI_SCR_SSIEN;
+
+ /*
+ * If we're in synchronous mode, and the SSI is already enabled,
+@@ -717,38 +861,6 @@
+ if (enabled && ssi_private->cpu_dai_drv.symmetric_rates)
+ return 0;
+
+- if (fsl_ssi_is_i2s_master(ssi_private)) {
+- ret = fsl_ssi_set_bclk(substream, cpu_dai, hw_params);
+- if (ret)
+- return ret;
+-
+- /* Do not enable the clock if it is already enabled */
+- if (!(ssi_private->baudclk_streams & BIT(substream->stream))) {
+- ret = clk_prepare_enable(ssi_private->baudclk);
+- if (ret)
+- return ret;
+-
+- ssi_private->baudclk_streams |= BIT(substream->stream);
+- }
+- }
+-
+- if (!fsl_ssi_is_ac97(ssi_private)) {
+- u8 i2smode;
+- /*
+- * Switch to normal net mode in order to have a frame sync
+- * signal every 32 bits instead of 16 bits
+- */
+- if (fsl_ssi_is_i2s_cbm_cfs(ssi_private) && sample_size == 16)
+- i2smode = CCSR_SSI_SCR_I2S_MODE_NORMAL |
+- CCSR_SSI_SCR_NET;
+- else
+- i2smode = ssi_private->i2s_mode;
+-
+- regmap_update_bits(regs, CCSR_SSI_SCR,
+- CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK,
+- channels == 1 ? 0 : i2smode);
+- }
+-
+ /*
+ * FIXME: The documentation says that SxCCR[WL] should not be
+ * modified while the SSI is enabled. The only time this can
+@@ -762,80 +874,49 @@
+ /* In synchronous mode, the SSI uses STCCR for capture */
+ if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ||
+ ssi_private->cpu_dai_drv.symmetric_rates)
+- regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_WL_MASK,
+- wl);
++ write_ssi_mask(&ssi->stccr, CCSR_SSI_SxCCR_WL_MASK, wl);
+ else
+- regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_WL_MASK,
+- wl);
++ write_ssi_mask(&ssi->srccr, CCSR_SSI_SxCCR_WL_MASK, wl);
+
+- return 0;
+-}
+-
+-static int fsl_ssi_hw_free(struct snd_pcm_substream *substream,
+- struct snd_soc_dai *cpu_dai)
+-{
+- struct snd_soc_pcm_runtime *rtd = substream->private_data;
+- struct fsl_ssi_private *ssi_private =
+- snd_soc_dai_get_drvdata(rtd->cpu_dai);
+-
+- if (fsl_ssi_is_i2s_master(ssi_private) &&
+- ssi_private->baudclk_streams & BIT(substream->stream)) {
+- clk_disable_unprepare(ssi_private->baudclk);
+- ssi_private->baudclk_streams &= ~BIT(substream->stream);
+- }
++ if (!ssi_private->imx_ac97)
++ write_ssi_mask(&ssi->scr,
++ CCSR_SSI_SCR_NET | CCSR_SSI_SCR_I2S_MODE_MASK,
++ channels == 1 ? 0 : ssi_private->i2s_mode);
+
+ return 0;
+ }
+
+-static int _fsl_ssi_set_dai_fmt(struct device *dev,
+- struct fsl_ssi_private *ssi_private,
+- unsigned int fmt)
++/**
++ * fsl_ssi_set_dai_fmt - configure Digital Audio Interface Format.
++ */
++static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+ {
+- struct regmap *regs = ssi_private->regs;
++ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+ u32 strcr = 0, stcr, srcr, scr, mask;
+- u8 wm;
+
+- ssi_private->dai_fmt = fmt;
+-
+- if (fsl_ssi_is_i2s_master(ssi_private) && IS_ERR(ssi_private->baudclk)) {
+- dev_err(dev, "baudclk is missing which is necessary for master mode\n");
+- return -EINVAL;
+- }
+-
+- fsl_ssi_setup_reg_vals(ssi_private);
+-
+- regmap_read(regs, CCSR_SSI_SCR, &scr);
+- scr &= ~(CCSR_SSI_SCR_SYN | CCSR_SSI_SCR_I2S_MODE_MASK);
+- scr |= CCSR_SSI_SCR_SYNC_TX_FS;
++ scr = read_ssi(&ssi->scr) & ~(CCSR_SSI_SCR_SYN | CCSR_SSI_SCR_I2S_MODE_MASK);
++ scr |= CCSR_SSI_SCR_NET;
+
+ mask = CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFDIR | CCSR_SSI_STCR_TXDIR |
+ CCSR_SSI_STCR_TSCKP | CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TFSL |
+ CCSR_SSI_STCR_TEFS;
+- regmap_read(regs, CCSR_SSI_STCR, &stcr);
+- regmap_read(regs, CCSR_SSI_SRCR, &srcr);
+- stcr &= ~mask;
+- srcr &= ~mask;
++ stcr = read_ssi(&ssi->stcr) & ~mask;
++ srcr = read_ssi(&ssi->srcr) & ~mask;
+
+- ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+- case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBS_CFS:
+- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
+- regmap_update_bits(regs, CCSR_SSI_STCCR,
+- CCSR_SSI_SxCCR_DC_MASK,
+- CCSR_SSI_SxCCR_DC(2));
+- regmap_update_bits(regs, CCSR_SSI_SRCCR,
+- CCSR_SSI_SxCCR_DC_MASK,
+- CCSR_SSI_SxCCR_DC(2));
++ ssi_private->i2s_mode = CCSR_SSI_SCR_I2S_MODE_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
++ ssi_private->i2s_mode = CCSR_SSI_SCR_I2S_MODE_SLAVE;
+ break;
+ default:
+ return -EINVAL;
+ }
++ scr |= ssi_private->i2s_mode;
+
+ /* Data on rising edge of bclk, frame low, 1clk before data */
+ strcr |= CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TSCKP |
+@@ -855,13 +936,9 @@
+ strcr |= CCSR_SSI_STCR_TFSL | CCSR_SSI_STCR_TSCKP |
+ CCSR_SSI_STCR_TXBIT0;
+ break;
+- case SND_SOC_DAIFMT_AC97:
+- ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_NORMAL;
+- break;
+ default:
+ return -EINVAL;
+ }
+- scr |= ssi_private->i2s_mode;
+
+ /* DAI clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+@@ -894,11 +971,6 @@
+ case SND_SOC_DAIFMT_CBM_CFM:
+ scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+ break;
+- case SND_SOC_DAIFMT_CBM_CFS:
+- strcr &= ~CCSR_SSI_STCR_TXDIR;
+- strcr |= CCSR_SSI_STCR_TFDIR;
+- scr &= ~CCSR_SSI_SCR_SYS_CLK_EN;
+- break;
+ default:
+ return -EINVAL;
+ }
+@@ -912,54 +984,105 @@
+ scr |= CCSR_SSI_SCR_SYN;
+ }
+
+- regmap_write(regs, CCSR_SSI_STCR, stcr);
+- regmap_write(regs, CCSR_SSI_SRCR, srcr);
+- regmap_write(regs, CCSR_SSI_SCR, scr);
+-
+- /*
+- * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
+- * use FIFO 1. We program the transmit water to signal a DMA transfer
+- * if there are only two (or fewer) elements left in the FIFO. Two
+- * elements equals one frame (left channel, right channel). This value,
+- * however, depends on the depth of the transmit buffer.
+- *
+- * We set the watermark on the same level as the DMA burstsize. For
+- * fiq it is probably better to use the biggest possible watermark
+- * size.
+- */
+- if (ssi_private->use_dma)
+- wm = ssi_private->fifo_depth - 2;
+- else
+- wm = ssi_private->fifo_depth;
+-
+- regmap_write(regs, CCSR_SSI_SFCSR,
+- CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
+- CCSR_SSI_SFCSR_TFWM1(wm) | CCSR_SSI_SFCSR_RFWM1(wm));
+-
+- if (ssi_private->use_dual_fifo) {
+- regmap_update_bits(regs, CCSR_SSI_SRCR, CCSR_SSI_SRCR_RFEN1,
+- CCSR_SSI_SRCR_RFEN1);
+- regmap_update_bits(regs, CCSR_SSI_STCR, CCSR_SSI_STCR_TFEN1,
+- CCSR_SSI_STCR_TFEN1);
+- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_TCH_EN,
+- CCSR_SSI_SCR_TCH_EN);
+- }
+-
+- if (fmt & SND_SOC_DAIFMT_AC97)
+- fsl_ssi_setup_ac97(ssi_private);
++ write_ssi(stcr, &ssi->stcr);
++ write_ssi(srcr, &ssi->srcr);
++ write_ssi(scr, &ssi->scr);
+
+ return 0;
+-
+ }
+
+ /**
+- * fsl_ssi_set_dai_fmt - configure Digital Audio Interface Format.
++ * fsl_ssi_set_dai_sysclk - configure Digital Audio Interface bit clock
++ *
++ * Note: This function can be only called when using SSI as DAI master
++ *
++ * Quick instruction for parameters:
++ * freq: Output BCLK frequency = samplerate * 32 (fixed) * channels
++ * dir: SND_SOC_CLOCK_OUT -> TxBCLK, SND_SOC_CLOCK_IN -> RxBCLK.
+ */
+-static int fsl_ssi_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
++static int fsl_ssi_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
++ int clk_id, unsigned int freq, int dir)
+ {
+ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
++ int synchronous = ssi_private->cpu_dai_drv.symmetric_rates, ret;
++ u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
++ unsigned long flags, clkrate, baudrate, tmprate;
++ u64 sub, savesub = 100000;
++
++ /* Don't apply it to any non-baudclk circumstance */
++ if (IS_ERR(ssi_private->baudclk))
++ return -EINVAL;
++
++ /* It should be already enough to divide clock by setting pm alone */
++ psr = 0;
++ div2 = 0;
++
++ factor = (div2 + 1) * (7 * psr + 1) * 2;
++
++ for (i = 0; i < 255; i++) {
++ /* The bclk rate must be smaller than 1/5 sysclk rate */
++ if (factor * (i + 1) < 5)
++ continue;
++
++ tmprate = freq * factor * (i + 2);
++ clkrate = clk_round_rate(ssi_private->baudclk, tmprate);
++
++ do_div(clkrate, factor);
++ afreq = (u32)clkrate / (i + 1);
++
++ if (freq == afreq)
++ sub = 0;
++ else if (freq / afreq == 1)
++ sub = freq - afreq;
++ else if (afreq / freq == 1)
++ sub = afreq - freq;
++ else
++ continue;
++
++ /* Calculate the fraction */
++ sub *= 100000;
++ do_div(sub, freq);
++
++ if (sub < savesub) {
++ baudrate = tmprate;
++ savesub = sub;
++ pm = i;
++ }
++
++ /* We are lucky */
++ if (savesub == 0)
++ break;
++ }
++
++ /* No proper pm found if it is still remaining the initial value */
++ if (pm == 999) {
++ dev_err(cpu_dai->dev, "failed to handle the required sysclk\n");
++ return -EINVAL;
++ }
++
++ stccr = CCSR_SSI_SxCCR_PM(pm + 1) | (div2 ? CCSR_SSI_SxCCR_DIV2 : 0) |
++ (psr ? CCSR_SSI_SxCCR_PSR : 0);
++ mask = CCSR_SSI_SxCCR_PM_MASK | CCSR_SSI_SxCCR_DIV2 | CCSR_SSI_SxCCR_PSR;
++
++ if (dir == SND_SOC_CLOCK_OUT || synchronous)
++ write_ssi_mask(&ssi->stccr, mask, stccr);
++ else
++ write_ssi_mask(&ssi->srccr, mask, stccr);
++
++ spin_lock_irqsave(&ssi_private->baudclk_lock, flags);
++ if (!ssi_private->baudclk_locked) {
++ ret = clk_set_rate(ssi_private->baudclk, baudrate);
++ if (ret) {
++ spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags);
++ dev_err(cpu_dai->dev, "failed to set baudclk rate\n");
++ return -EINVAL;
++ }
++ ssi_private->baudclk_locked = true;
++ }
++ spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags);
+
+- return _fsl_ssi_set_dai_fmt(cpu_dai->dev, ssi_private, fmt);
++ return 0;
+ }
+
+ /**
+@@ -971,34 +1094,31 @@
+ u32 rx_mask, int slots, int slot_width)
+ {
+ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(cpu_dai);
+- struct regmap *regs = ssi_private->regs;
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+ u32 val;
+
+ /* The slot number should be >= 2 if using Network mode or I2S mode */
+- regmap_read(regs, CCSR_SSI_SCR, &val);
+- val &= CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_NET;
++ val = read_ssi(&ssi->scr) & (CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_NET);
+ if (val && slots < 2) {
+ dev_err(cpu_dai->dev, "slot number should be >= 2 in I2S or NET\n");
+ return -EINVAL;
+ }
+
+- regmap_update_bits(regs, CCSR_SSI_STCCR, CCSR_SSI_SxCCR_DC_MASK,
++ write_ssi_mask(&ssi->stccr, CCSR_SSI_SxCCR_DC_MASK,
+ CCSR_SSI_SxCCR_DC(slots));
+- regmap_update_bits(regs, CCSR_SSI_SRCCR, CCSR_SSI_SxCCR_DC_MASK,
++ write_ssi_mask(&ssi->srccr, CCSR_SSI_SxCCR_DC_MASK,
+ CCSR_SSI_SxCCR_DC(slots));
+
+ /* The register SxMSKs needs SSI to provide essential clock due to
+ * hardware design. So we here temporarily enable SSI to set them.
+ */
+- regmap_read(regs, CCSR_SSI_SCR, &val);
+- val &= CCSR_SSI_SCR_SSIEN;
+- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN,
+- CCSR_SSI_SCR_SSIEN);
++ val = read_ssi(&ssi->scr) & CCSR_SSI_SCR_SSIEN;
++ write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_SSIEN);
+
+- regmap_write(regs, CCSR_SSI_STMSK, ~tx_mask);
+- regmap_write(regs, CCSR_SSI_SRMSK, ~rx_mask);
++ write_ssi(tx_mask, &ssi->stmsk);
++ write_ssi(rx_mask, &ssi->srmsk);
+
+- regmap_update_bits(regs, CCSR_SSI_SCR, CCSR_SSI_SCR_SSIEN, val);
++ write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_SSIEN, val);
+
+ return 0;
+ }
+@@ -1017,7 +1137,8 @@
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+- struct regmap *regs = ssi_private->regs;
++ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
++ unsigned long flags;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+@@ -1027,6 +1148,7 @@
+ fsl_ssi_tx_config(ssi_private, true);
+ else
+ fsl_ssi_rx_config(ssi_private, true);
++ dump_reg(ssi);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+@@ -1036,27 +1158,40 @@
+ fsl_ssi_tx_config(ssi_private, false);
+ else
+ fsl_ssi_rx_config(ssi_private, false);
++
++ if (!ssi_private->imx_ac97 && (read_ssi(&ssi->scr) &
++ (CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE)) == 0) {
++ spin_lock_irqsave(&ssi_private->baudclk_lock, flags);
++ ssi_private->baudclk_locked = false;
++ spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags);
++ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+- if (fsl_ssi_is_ac97(ssi_private)) {
++ if (ssi_private->imx_ac97) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_TX_CLR);
++ write_ssi(CCSR_SSI_SOR_TX_CLR, &ssi->sor);
+ else
+- regmap_write(regs, CCSR_SSI_SOR, CCSR_SSI_SOR_RX_CLR);
++ write_ssi(CCSR_SSI_SOR_RX_CLR, &ssi->sor);
+ }
+
+ return 0;
+ }
+
++static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
++ struct snd_soc_dai *dai)
++{
++ pm_runtime_put_sync(dai->dev);
++}
++
+ static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
+ {
+ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
+
+- if (ssi_private->soc->imx && ssi_private->use_dma) {
++ if (ssi_private->ssi_on_imx && ssi_private->use_dma) {
+ dai->playback_dma_data = &ssi_private->dma_params_tx;
+ dai->capture_dma_data = &ssi_private->dma_params_rx;
+ }
+@@ -1066,27 +1201,24 @@
+
+ static const struct snd_soc_dai_ops fsl_ssi_dai_ops = {
+ .startup = fsl_ssi_startup,
+- .shutdown = fsl_ssi_shutdown,
+ .hw_params = fsl_ssi_hw_params,
+- .hw_free = fsl_ssi_hw_free,
+ .set_fmt = fsl_ssi_set_dai_fmt,
+ .set_sysclk = fsl_ssi_set_dai_sysclk,
+ .set_tdm_slot = fsl_ssi_set_dai_tdm_slot,
+ .trigger = fsl_ssi_trigger,
++ .shutdown = fsl_ssi_shutdown,
+ };
+
+ /* Template for the CPU dai driver structure */
+ static struct snd_soc_dai_driver fsl_ssi_dai_template = {
+ .probe = fsl_ssi_dai_probe,
+ .playback = {
+- .stream_name = "CPU-Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = FSLSSI_I2S_RATES,
+ .formats = FSLSSI_I2S_FORMATS,
+ },
+ .capture = {
+- .stream_name = "CPU-Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = FSLSSI_I2S_RATES,
+@@ -1100,7 +1232,7 @@
+ };
+
+ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
+- .bus_control = true,
++// .ac97_control = 1,
+ .playback = {
+ .stream_name = "AC97 Playback",
+ .channels_min = 2,
+@@ -1121,10 +1253,15 @@
+
+ static struct fsl_ssi_private *fsl_ac97_data;
+
++static void fsl_ssi_ac97_init(void)
++{
++ fsl_ssi_setup(fsl_ac97_data);
++}
++
+ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
+ unsigned short val)
+ {
+- struct regmap *regs = fsl_ac97_data->regs;
++ struct ccsr_ssi *ssi = fsl_ac97_data->ssi;
+ unsigned int lreg;
+ unsigned int lval;
+
+@@ -1133,12 +1270,12 @@
+
+
+ lreg = reg << 12;
+- regmap_write(regs, CCSR_SSI_SACADD, lreg);
++ write_ssi(lreg, &ssi->sacadd);
+
+ lval = val << 4;
+- regmap_write(regs, CCSR_SSI_SACDAT, lval);
++ write_ssi(lval , &ssi->sacdat);
+
+- regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
++ write_ssi_mask(&ssi->sacnt, CCSR_SSI_SACNT_RDWR_MASK,
+ CCSR_SSI_SACNT_WR);
+ udelay(100);
+ }
+@@ -1146,21 +1283,19 @@
+ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
+ unsigned short reg)
+ {
+- struct regmap *regs = fsl_ac97_data->regs;
++ struct ccsr_ssi *ssi = fsl_ac97_data->ssi;
+
+ unsigned short val = -1;
+- u32 reg_val;
+ unsigned int lreg;
+
+ lreg = (reg & 0x7f) << 12;
+- regmap_write(regs, CCSR_SSI_SACADD, lreg);
+- regmap_update_bits(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_RDWR_MASK,
++ write_ssi(lreg, &ssi->sacadd);
++ write_ssi_mask(&ssi->sacnt, CCSR_SSI_SACNT_RDWR_MASK,
+ CCSR_SSI_SACNT_RD);
+
+ udelay(100);
+
+- regmap_read(regs, CCSR_SSI_SACDAT, &reg_val);
+- val = (reg_val >> 4) & 0xffff;
++ val = (read_ssi(&ssi->sacdat) >> 4) & 0xffff;
+
+ return val;
+ }
+@@ -1185,112 +1320,20 @@
+ }
+ }
+
+-static int fsl_ssi_imx_probe(struct platform_device *pdev,
+- struct fsl_ssi_private *ssi_private, void __iomem *iomem)
+-{
+- struct device_node *np = pdev->dev.of_node;
+- u32 dmas[4];
+- int ret;
+-
+- if (ssi_private->has_ipg_clk_name)
+- ssi_private->clk = devm_clk_get(&pdev->dev, "ipg");
+- else
+- ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(ssi_private->clk)) {
+- ret = PTR_ERR(ssi_private->clk);
+- dev_err(&pdev->dev, "could not get clock: %d\n", ret);
+- return ret;
+- }
+-
+- if (!ssi_private->has_ipg_clk_name) {
+- ret = clk_prepare_enable(ssi_private->clk);
+- if (ret) {
+- dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+- return ret;
+- }
+- }
+-
+- /* For those SLAVE implementations, we ingore non-baudclk cases
+- * and, instead, abandon MASTER mode that needs baud clock.
+- */
+- ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
+- if (IS_ERR(ssi_private->baudclk))
+- dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
+- PTR_ERR(ssi_private->baudclk));
+-
+- /*
+- * We have burstsize be "fifo_depth - 2" to match the SSI
+- * watermark setting in fsl_ssi_startup().
+- */
+- ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
+- ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
+- ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
+- ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
+-
+- ret = of_property_read_u32_array(np, "dmas", dmas, 4);
+- if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+- ssi_private->use_dual_fifo = true;
+- /* When using dual fifo mode, we need to keep watermark
+- * as even numbers due to dma script limitation.
+- */
+- ssi_private->dma_params_tx.maxburst &= ~0x1;
+- ssi_private->dma_params_rx.maxburst &= ~0x1;
+- }
+-
+- if (!ssi_private->use_dma) {
+-
+- /*
+- * Some boards use an incompatible codec. To get it
+- * working, we are using imx-fiq-pcm-audio, that
+- * can handle those codecs. DMA is not possible in this
+- * situation.
+- */
+-
+- ssi_private->fiq_params.irq = ssi_private->irq;
+- ssi_private->fiq_params.base = iomem;
+- ssi_private->fiq_params.dma_params_rx =
+- &ssi_private->dma_params_rx;
+- ssi_private->fiq_params.dma_params_tx =
+- &ssi_private->dma_params_tx;
+-
+- ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params);
+- if (ret)
+- goto error_pcm;
+- } else {
+- ret = imx_pcm_dma_init(pdev);
+- if (ret)
+- goto error_pcm;
+- }
+-
+- return 0;
+-
+-error_pcm:
+-
+- if (!ssi_private->has_ipg_clk_name)
+- clk_disable_unprepare(ssi_private->clk);
+- return ret;
+-}
+-
+-static void fsl_ssi_imx_clean(struct platform_device *pdev,
+- struct fsl_ssi_private *ssi_private)
+-{
+- if (!ssi_private->use_dma)
+- imx_pcm_fiq_exit(pdev);
+- if (!ssi_private->has_ipg_clk_name)
+- clk_disable_unprepare(ssi_private->clk);
+-}
+-
+ static int fsl_ssi_probe(struct platform_device *pdev)
+ {
+ struct fsl_ssi_private *ssi_private;
+ int ret = 0;
++ struct device_attribute *dev_attr = NULL;
+ struct device_node *np = pdev->dev.of_node;
++ u32 dmas[4];
+ const struct of_device_id *of_id;
++ enum fsl_ssi_type hw_type;
+ const char *p, *sprop;
+ const uint32_t *iprop;
+- struct resource *res;
+- void __iomem *iomem;
++ struct resource res;
+ char name[64];
++ bool ac97 = false;
+
+ /* SSIs that are not connected on the board should have a
+ * status = "disabled"
+@@ -1300,32 +1343,39 @@
+ return -ENODEV;
+
+ of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
+- if (!of_id || !of_id->data)
++ if (!of_id)
++ return -EINVAL;
++ hw_type = (enum fsl_ssi_type) of_id->data;
++
++ sprop = of_get_property(np, "fsl,mode", NULL);
++ if (!sprop) {
++ dev_err(&pdev->dev, "fsl,mode property is necessary\n");
+ return -EINVAL;
++ }
++ if (!strcmp(sprop, "ac97-slave"))
++ ac97 = true;
+
+- ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private),
+- GFP_KERNEL);
++ /* The DAI name is the last part of the full name of the node. */
++ p = strrchr(np->full_name, '/') + 1;
++ ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private) + strlen(p),
++ GFP_KERNEL);
+ if (!ssi_private) {
+ dev_err(&pdev->dev, "could not allocate DAI object\n");
+ return -ENOMEM;
+ }
+
+- ssi_private->soc = of_id->data;
+-
+- sprop = of_get_property(np, "fsl,mode", NULL);
+- if (sprop) {
+- if (!strcmp(sprop, "ac97-slave"))
+- ssi_private->dai_fmt = SND_SOC_DAIFMT_AC97;
+- }
++ strcpy(ssi_private->name, p);
+
+ ssi_private->use_dma = !of_property_read_bool(np,
+ "fsl,fiq-stream-filter");
++ ssi_private->hw_type = hw_type;
+
+- if (fsl_ssi_is_ac97(ssi_private)) {
++ if (ac97) {
+ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_ac97_dai,
+ sizeof(fsl_ssi_ac97_dai));
+
+ fsl_ac97_data = ssi_private;
++ ssi_private->imx_ac97 = true;
+
+ snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+ } else {
+@@ -1333,33 +1383,25 @@
+ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
+ sizeof(fsl_ssi_dai_template));
+ }
+- ssi_private->cpu_dai_drv.name = dev_name(&pdev->dev);
++ ssi_private->cpu_dai_drv.name = ssi_private->name;
+
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- iomem = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(iomem))
+- return PTR_ERR(iomem);
+- ssi_private->ssi_phys = res->start;
+-
+- ret = of_property_match_string(np, "clock-names", "ipg");
+- if (ret < 0) {
+- ssi_private->has_ipg_clk_name = false;
+- ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
+- &fsl_ssi_regconfig);
+- } else {
+- ssi_private->has_ipg_clk_name = true;
+- ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
+- "ipg", iomem, &fsl_ssi_regconfig);
+- }
+- if (IS_ERR(ssi_private->regs)) {
+- dev_err(&pdev->dev, "Failed to init register map\n");
+- return PTR_ERR(ssi_private->regs);
++ /* Get the addresses and IRQ */
++ ret = of_address_to_resource(np, 0, &res);
++ if (ret) {
++ dev_err(&pdev->dev, "could not determine device resources\n");
++ return ret;
++ }
++ ssi_private->ssi = of_iomap(np, 0);
++ if (!ssi_private->ssi) {
++ dev_err(&pdev->dev, "could not map device resources\n");
++ return -ENOMEM;
+ }
++ ssi_private->ssi_phys = res.start;
+
+- ssi_private->irq = platform_get_irq(pdev, 0);
+- if (ssi_private->irq < 0) {
+- dev_err(&pdev->dev, "no irq for node %s\n", pdev->name);
+- return ssi_private->irq;
++ ssi_private->irq = irq_of_parse_and_map(np, 0);
++ if (!ssi_private->irq) {
++ dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
++ return -ENXIO;
+ }
+
+ /* Are the RX and the TX clocks locked? */
+@@ -1377,43 +1419,153 @@
+ /* Older 8610 DTs didn't have the fifo-depth property */
+ ssi_private->fifo_depth = 8;
+
+- dev_set_drvdata(&pdev->dev, ssi_private);
++ ssi_private->baudclk_locked = false;
++ spin_lock_init(&ssi_private->baudclk_lock);
+
+- if (ssi_private->soc->imx) {
+- ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem);
+- if (ret)
+- return ret;
++ /*
++ * imx51 and later SoCs have a slightly different IP that allows the
++ * SSI configuration while the SSI unit is running.
++ *
++ * More important, it is necessary on those SoCs to configure the
++ * sperate TX/RX DMA bits just before starting the stream
++ * (fsl_ssi_trigger). The SDMA unit has to be configured before fsl_ssi
++ * sends any DMA requests to the SDMA unit, otherwise it is not defined
++ * how the SDMA unit handles the DMA request.
++ *
++ * SDMA units are present on devices starting at imx35 but the imx35
++ * reference manual states that the DMA bits should not be changed
++ * while the SSI unit is running (SSIEN). So we support the necessary
++ * online configuration of fsl-ssi starting at imx51.
++ */
++ switch (hw_type) {
++ case FSL_SSI_MCP8610:
++ case FSL_SSI_MX21:
++ case FSL_SSI_MX35:
++ ssi_private->offline_config = true;
++ break;
++ case FSL_SSI_MX51:
++ ssi_private->offline_config = false;
++ break;
+ }
+
+- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
+- &ssi_private->cpu_dai_drv, 1);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+- goto error_asoc_register;
++ if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 ||
++ hw_type == FSL_SSI_MX35) {
++ ssi_private->ssi_on_imx = true;
++
++ ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(ssi_private->clk)) {
++ ret = PTR_ERR(ssi_private->clk);
++ dev_err(&pdev->dev, "could not get clock: %d\n", ret);
++ goto error_irqmap;
++ }
++ ret = clk_prepare(ssi_private->clk);
++ if (ret) {
++ dev_err(&pdev->dev, "clk_prepare failed: %d\n",
++ ret);
++ goto error_irqmap;
++ }
++
++ /* For those SLAVE implementations, we ingore non-baudclk cases
++ * and, instead, abandon MASTER mode that needs baud clock.
++ */
++ ssi_private->baudclk = devm_clk_get(&pdev->dev, "baud");
++ if (IS_ERR(ssi_private->baudclk))
++ dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
++ PTR_ERR(ssi_private->baudclk));
++ else
++ clk_prepare(ssi_private->baudclk);
++
++ /*
++ * We have burstsize be "fifo_depth - 2" to match the SSI
++ * watermark setting in fsl_ssi_startup().
++ */
++ ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
++ ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
++ ssi_private->dma_params_tx.addr =
++ ssi_private->ssi_phys + offsetof(struct ccsr_ssi, stx0);
++ ssi_private->dma_params_rx.addr =
++ ssi_private->ssi_phys + offsetof(struct ccsr_ssi, srx0);
++
++ ret = !of_property_read_u32_array(np, "dmas", dmas, 4);
++ if (ssi_private->use_dma && !ret && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
++ ssi_private->use_dual_fifo = true;
++ /* When using dual fifo mode, we need to keep watermark
++ * as even numbers due to dma script limitation.
++ */
++ ssi_private->dma_params_tx.maxburst &= ~0x1;
++ ssi_private->dma_params_rx.maxburst &= ~0x1;
++ }
+ }
+
++ /*
++ * Enable interrupts only for MCP8610 and MX51. The other MXs have
++ * different writeable interrupt status registers.
++ */
+ if (ssi_private->use_dma) {
++ /* The 'name' should not have any slashes in it. */
+ ret = devm_request_irq(&pdev->dev, ssi_private->irq,
+- fsl_ssi_isr, 0, dev_name(&pdev->dev),
++ fsl_ssi_isr, 0, ssi_private->name,
+ ssi_private);
++ ssi_private->irq_stats = true;
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not claim irq %u\n",
+ ssi_private->irq);
+- goto error_asoc_register;
++ goto error_clk;
+ }
+ }
+
+- ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++
++ /* Register with ASoC */
++ dev_set_drvdata(&pdev->dev, ssi_private);
++
++ ret = snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
++ &ssi_private->cpu_dai_drv, 1);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
++ goto error_dev;
++ }
++
++ ret = fsl_ssi_debugfs_create(ssi_private, &pdev->dev);
+ if (ret)
+- goto error_asoc_register;
++ goto error_dbgfs;
++
++ if (ssi_private->ssi_on_imx) {
++ if (!ssi_private->use_dma) {
++
++ /*
++ * Some boards use an incompatible codec. To get it
++ * working, we are using imx-fiq-pcm-audio, that
++ * can handle those codecs. DMA is not possible in this
++ * situation.
++ */
++
++ ssi_private->fiq_params.irq = ssi_private->irq;
++ ssi_private->fiq_params.base = ssi_private->ssi;
++ ssi_private->fiq_params.dma_params_rx =
++ &ssi_private->dma_params_rx;
++ ssi_private->fiq_params.dma_params_tx =
++ &ssi_private->dma_params_tx;
++
++ ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params);
++ if (ret)
++ goto error_pcm;
++ } else {
++ ret = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
++ if (ret)
++ goto error_pcm;
++ }
++ }
+
+ /*
+ * If codec-handle property is missing from SSI node, we assume
+ * that the machine driver uses new binding which does not require
+ * SSI driver to trigger machine driver's probe.
+ */
+- if (!of_get_property(np, "codec-handle", NULL))
++ if (!of_get_property(np, "codec-handle", NULL)) {
++ ssi_private->new_binding = true;
+ goto done;
++ }
+
+ /* Trigger the machine driver's probe function. The platform driver
+ * name of the machine driver is taken from /compatible property of the
+@@ -1433,22 +1585,42 @@
+ if (IS_ERR(ssi_private->pdev)) {
+ ret = PTR_ERR(ssi_private->pdev);
+ dev_err(&pdev->dev, "failed to register platform: %d\n", ret);
+- goto error_sound_card;
++ goto error_dai;
+ }
+
+ done:
+- if (ssi_private->dai_fmt)
+- _fsl_ssi_set_dai_fmt(&pdev->dev, ssi_private,
+- ssi_private->dai_fmt);
++ if (ssi_private->imx_ac97)
++ fsl_ssi_ac97_init();
+
+ return 0;
+
+-error_sound_card:
+- fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
++error_dai:
++ if (ssi_private->ssi_on_imx && !ssi_private->use_dma)
++ imx_pcm_fiq_exit(pdev);
++
++error_pcm:
++ fsl_ssi_debugfs_remove(ssi_private);
++
++error_dbgfs:
++ snd_soc_unregister_component(&pdev->dev);
++
++error_dev:
++ device_remove_file(&pdev->dev, dev_attr);
+
+-error_asoc_register:
+- if (ssi_private->soc->imx)
+- fsl_ssi_imx_clean(pdev, ssi_private);
++ if (ssi_private->ssi_on_imx) {
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_unprepare(ssi_private->baudclk);
++ clk_unprepare(ssi_private->clk);
++ }
++error_clk:
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_unprepare(ssi_private->baudclk);
++ if (!IS_ERR(ssi_private->clk))
++ clk_unprepare(ssi_private->clk);
++
++error_irqmap:
++ if (ssi_private->irq_stats)
++ irq_dispose_mapping(ssi_private->irq);
+
+ return ret;
+ }
+@@ -1457,13 +1629,18 @@
+ {
+ struct fsl_ssi_private *ssi_private = dev_get_drvdata(&pdev->dev);
+
+- fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
++ fsl_ssi_debugfs_remove(ssi_private);
+
+- if (ssi_private->pdev)
++ if (!ssi_private->new_binding)
+ platform_device_unregister(ssi_private->pdev);
+-
+- if (ssi_private->soc->imx)
+- fsl_ssi_imx_clean(pdev, ssi_private);
++ snd_soc_unregister_component(&pdev->dev);
++ if (ssi_private->ssi_on_imx) {
++ if (!IS_ERR(ssi_private->baudclk))
++ clk_unprepare(ssi_private->baudclk);
++ clk_unprepare(ssi_private->clk);
++ }
++ if (ssi_private->irq_stats)
++ irq_dispose_mapping(ssi_private->irq);
+
+ return 0;
+ }
+@@ -1471,7 +1648,9 @@
+ static struct platform_driver fsl_ssi_driver = {
+ .driver = {
+ .name = "fsl-ssi-dai",
++ .owner = THIS_MODULE,
+ .of_match_table = fsl_ssi_ids,
++ .pm = &fsl_ssi_pm,
+ },
+ .probe = fsl_ssi_probe,
+ .remove = fsl_ssi_remove,
+diff -Nur linux-4.1.3/sound/soc/fsl/fsl_ssi.h linux-xbian-imx6/sound/soc/fsl/fsl_ssi.h
+--- linux-4.1.3/sound/soc/fsl/fsl_ssi.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/fsl_ssi.h 2015-07-27 23:13:11.153188979 +0200
+@@ -12,32 +12,33 @@
+ #ifndef _MPC8610_I2S_H
+ #define _MPC8610_I2S_H
+
+-/* SSI registers */
+-#define CCSR_SSI_STX0 0x00
+-#define CCSR_SSI_STX1 0x04
+-#define CCSR_SSI_SRX0 0x08
+-#define CCSR_SSI_SRX1 0x0c
+-#define CCSR_SSI_SCR 0x10
+-#define CCSR_SSI_SISR 0x14
+-#define CCSR_SSI_SIER 0x18
+-#define CCSR_SSI_STCR 0x1c
+-#define CCSR_SSI_SRCR 0x20
+-#define CCSR_SSI_STCCR 0x24
+-#define CCSR_SSI_SRCCR 0x28
+-#define CCSR_SSI_SFCSR 0x2c
+-#define CCSR_SSI_STR 0x30
+-#define CCSR_SSI_SOR 0x34
+-#define CCSR_SSI_SACNT 0x38
+-#define CCSR_SSI_SACADD 0x3c
+-#define CCSR_SSI_SACDAT 0x40
+-#define CCSR_SSI_SATAG 0x44
+-#define CCSR_SSI_STMSK 0x48
+-#define CCSR_SSI_SRMSK 0x4c
+-#define CCSR_SSI_SACCST 0x50
+-#define CCSR_SSI_SACCEN 0x54
+-#define CCSR_SSI_SACCDIS 0x58
++/* SSI Register Map */
++struct ccsr_ssi {
++ __be32 stx0; /* 0x.0000 - SSI Transmit Data Register 0 */
++ __be32 stx1; /* 0x.0004 - SSI Transmit Data Register 1 */
++ __be32 srx0; /* 0x.0008 - SSI Receive Data Register 0 */
++ __be32 srx1; /* 0x.000C - SSI Receive Data Register 1 */
++ __be32 scr; /* 0x.0010 - SSI Control Register */
++ __be32 sisr; /* 0x.0014 - SSI Interrupt Status Register Mixed */
++ __be32 sier; /* 0x.0018 - SSI Interrupt Enable Register */
++ __be32 stcr; /* 0x.001C - SSI Transmit Configuration Register */
++ __be32 srcr; /* 0x.0020 - SSI Receive Configuration Register */
++ __be32 stccr; /* 0x.0024 - SSI Transmit Clock Control Register */
++ __be32 srccr; /* 0x.0028 - SSI Receive Clock Control Register */
++ __be32 sfcsr; /* 0x.002C - SSI FIFO Control/Status Register */
++ __be32 str; /* 0x.0030 - SSI Test Register */
++ __be32 sor; /* 0x.0034 - SSI Option Register */
++ __be32 sacnt; /* 0x.0038 - SSI AC97 Control Register */
++ __be32 sacadd; /* 0x.003C - SSI AC97 Command Address Register */
++ __be32 sacdat; /* 0x.0040 - SSI AC97 Command Data Register */
++ __be32 satag; /* 0x.0044 - SSI AC97 Tag Register */
++ __be32 stmsk; /* 0x.0048 - SSI Transmit Time Slot Mask Register */
++ __be32 srmsk; /* 0x.004C - SSI Receive Time Slot Mask Register */
++ __be32 saccst; /* 0x.0050 - SSI AC97 Channel Status Register */
++ __be32 saccen; /* 0x.0054 - SSI AC97 Channel Enable Register */
++ __be32 saccdis; /* 0x.0058 - SSI AC97 Channel Disable Register */
++};
+
+-#define CCSR_SSI_SCR_SYNC_TX_FS 0x00001000
+ #define CCSR_SSI_SCR_RFR_CLK_DIS 0x00000800
+ #define CCSR_SSI_SCR_TFR_CLK_DIS 0x00000400
+ #define CCSR_SSI_SCR_TCH_EN 0x00000100
+@@ -205,64 +206,5 @@
+ #define CCSR_SSI_SACNT_FV 0x00000002
+ #define CCSR_SSI_SACNT_AC97EN 0x00000001
+
+-
+-struct device;
+-
+-#if IS_ENABLED(CONFIG_DEBUG_FS)
+-
+-struct fsl_ssi_dbg {
+- struct dentry *dbg_dir;
+- struct dentry *dbg_stats;
+-
+- struct {
+- unsigned int rfrc;
+- unsigned int tfrc;
+- unsigned int cmdau;
+- unsigned int cmddu;
+- unsigned int rxt;
+- unsigned int rdr1;
+- unsigned int rdr0;
+- unsigned int tde1;
+- unsigned int tde0;
+- unsigned int roe1;
+- unsigned int roe0;
+- unsigned int tue1;
+- unsigned int tue0;
+- unsigned int tfs;
+- unsigned int rfs;
+- unsigned int tls;
+- unsigned int rls;
+- unsigned int rff1;
+- unsigned int rff0;
+- unsigned int tfe1;
+- unsigned int tfe0;
+- } stats;
+-};
+-
+-void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *ssi_dbg, u32 sisr);
+-
+-int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg, struct device *dev);
+-
+-void fsl_ssi_debugfs_remove(struct fsl_ssi_dbg *ssi_dbg);
+-
+-#else
+-
+-struct fsl_ssi_dbg {
+-};
+-
+-static inline void fsl_ssi_dbg_isr(struct fsl_ssi_dbg *stats, u32 sisr)
+-{
+-}
+-
+-static inline int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg,
+- struct device *dev)
+-{
+- return 0;
+-}
+-
+-static inline void fsl_ssi_debugfs_remove(struct fsl_ssi_dbg *ssi_dbg)
+-{
+-}
+-#endif /* ! IS_ENABLED(CONFIG_DEBUG_FS) */
+-
+ #endif
++
+diff -Nur linux-4.1.3/sound/soc/fsl/hdmi_pcm.S linux-xbian-imx6/sound/soc/fsl/hdmi_pcm.S
+--- linux-4.1.3/sound/soc/fsl/hdmi_pcm.S 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/sound/soc/fsl/hdmi_pcm.S 2015-07-27 23:13:11.153188979 +0200
+@@ -0,0 +1,246 @@
++/**
++ * Copyright (C) 2010-2014 Freescale Semiconductor, Inc. All Rights Reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++.section .text
++
++.global hdmi_dma_copy_16_neon_lut
++.global hdmi_dma_copy_16_neon_fast
++.global hdmi_dma_copy_24_neon_lut
++.global hdmi_dma_copy_24_neon_fast
++
++
++/**
++ * hdmi_dma_copy_16_neon_lut
++ * Convert pcm sample to iec sample. Pcm sample is 16 bits.
++ * Frame index's between 0 and 47 inclusively. Channel count can be 1, 2, 4, 8.
++ * Frame count should be multipliable by 4, and Sample count by 8.
++ *
++ * C Prototype
++ * void hdmi_dma_copy_16_neon_lut(unsigned short *src, unsigned int *dst,
++ * int samples, unsigned char *lookup_table);
++ * Return value
++ * None
++ * Parameters
++ * src Source PCM16 samples
++ * dst Dest buffer to store pcm with header
++ * samples Contains sample count (=frame_count * channel_count)
++ * lookup_table Preconstructed header table. Channels interleaved.
++ */
++
++hdmi_dma_copy_16_neon_lut:
++ mov r12, #1 /* construct vector(1) */
++ vdup.8 d6, r12
++
++hdmi_dma_copy_16_neon_lut_start:
++
++ /* get 8 samples to q0 */
++ vld1.16 {d0, d1}, [r0]! /* TODO: aligned */
++
++ /* pld [r1, #(64*4)] */
++
++ /* xor every bit */
++ vcnt.8 q1, q0 /* count of 1s */
++ vpadd.i8 d2, d2, d3 /* only care about the LST in every element */
++ vand d2, d2, d6 /* clear other bits while keep the least bit */
++ vshl.u8 d2, d2, #3 /* bit p: d2 = d2 << 3 */
++
++ /* get packet header */
++ vld1.8 {d5}, [r3]!
++ veor d4, d5, d2 /* xor bit c */
++
++ /* store: (d4 << 16 | q0) << 8 */
++ vmovl.u8 q2, d4 /* expand from char to short */
++ vzip.16 q0, q2
++ vshl.u32 q0, q0, #8
++ vshl.u32 q1, q2, #8
++ vst1.32 {d0, d1, d2, d3}, [r1]!
++
++ /* decrease sample count */
++ subs r2, r2, #8
++ bne hdmi_dma_copy_16_neon_lut_start
++
++ mov pc, lr
++
++/**
++ * hdmi_dma_copy_16_neon_fast
++ * Convert pcm sample to iec sample. Pcm sample is 16 bits.
++ * Frame index's between 48 and 191 inclusively.
++ * Channel count can be 1, 2, 4 or 8.
++ * Frame count should be multipliable by 4, and Sample count by 8.
++ *
++ * C Prototype
++ * void hdmi_dma_copy_16_neon_fast(unsigned short *src,
++ * unsigned int *dst, int samples);
++ * Return value
++ * None
++ * Parameters
++ * src Source PCM16 samples
++ * dst Dest buffer to store pcm with header
++ * samples Contains sample count (=frame_count * channel_count)
++ */
++
++hdmi_dma_copy_16_neon_fast:
++ mov r12, #1 /* construct vector(1) */
++ vdup.8 d6, r12
++
++hdmi_dma_copy_16_neon_fast_start:
++ /* get 8 samples to q0 */
++ vld1.16 {d0, d1}, [r0]! /* TODO: aligned */
++
++ /* pld [r1, #(64*4)] */
++
++ /* xor every bit */
++ vcnt.8 q1, q0 /* count of 1s */
++ vpadd.i8 d2, d2, d3
++ vand d2, d2, d6 /* clear other bits while keep the LST */
++ /* finally we construct packet header */
++ vshl.u8 d4, d2, #3 /* bit p: d2 = d2 << 3 */
++
++ /* get packet header: always 0 */
++
++ /* store: (d4 << 16 | q0) << 8 */
++ vmovl.u8 q2, d4 /* expand from char to short */
++ vzip.16 q0, q2
++ vshl.u32 q0, q0, #8
++ vshl.u32 q1, q2, #8
++ vst1.32 {d0, d1, d2, d3}, [r1]!
++
++ /* decrease sample count */
++ subs r2, r2, #8
++ bne hdmi_dma_copy_16_neon_fast_start
++
++ mov pc, lr
++
++
++
++/**
++ * hdmi_dma_copy_24_neon_lut
++ * Convert pcm sample to iec sample. Pcm sample is 24 bits.
++ * Frame index's between 0 and 47 inclusively. Channel count can be 1, 2, 4, 8.
++ * Frame count should be multipliable by 4, and Sample count by 8.
++ *
++ * C Prototype
++ * void hdmi_dma_copy_24_neon_lut(unsigned int *src, unsigned int *dst,
++ * int samples, unsigned char *lookup_table);
++ * Return value
++ * None
++ * Parameters
++ * src Source PCM24 samples
++ * dst Dest buffer to store pcm with header
++ * samples Contains sample count (=frame_count * channel_count)
++ * lookup_table Preconstructed header table. Channels interleaved.
++ */
++
++hdmi_dma_copy_24_neon_lut:
++ vpush {d8}
++
++ mov r12, #1 /* construct vector(1) */
++ vdup.8 d8, r12
++
++hdmi_dma_copy_24_neon_lut_start:
++
++ /* get 8 samples to q0 and q1 */
++ vld1.32 {d0, d1, d2, d3}, [r0]! /* TODO: aligned */
++
++ /* pld [r1, #(64*4)] */
++
++ /* xor every bit */
++ vcnt.8 q2, q0 /* count of 1s */
++ vpadd.i8 d4, d4, d5 /* only care about the LSB in every element */
++ vcnt.8 q3, q1
++ vpadd.i8 d6, d6, d7
++ vpadd.i8 d4, d4, d6 /* d4: contains xor result and other dirty bits */
++ vand d4, d4, d8 /* clear other bits while keep the least bit */
++ vshl.u8 d4, d4, #3 /* bit p: d4 = d4 << 3 */
++
++ /* get packet header */
++ vld1.8 {d5}, [r3]!/* d5: original header */
++ veor d5, d5, d4 /* fix bit p */
++
++ /* store: (d5 << 24 | q0) */
++ vmovl.u8 q3, d5 /* expand from char to short */
++ vmovl.u16 q2, d6 /* expand from short to int */
++ vmovl.u16 q3, d7
++ vshl.u32 q2, q2, #24
++ vshl.u32 q3, q3, #24
++ vorr q0, q0, q2
++ vorr q1, q1, q3
++ vst1.32 {d0, d1, d2, d3}, [r1]!
++
++ /* decrease sample count */
++ subs r2, r2, #8
++ bne hdmi_dma_copy_24_neon_lut_start
++
++ vpop {d8}
++ mov pc, lr
++
++/**
++ * hdmi_dma_copy_24_neon_fast
++ * Convert pcm sample to iec sample. Pcm sample is 24 bits.
++ * Frame index's between 48 and 191 inclusively.
++ * Channel count can be 1, 2, 4 or 8.
++ * Frame count should be multipliable by 4, and Sample count by 8.
++ *
++ * C Prototype
++ * void hdmi_dma_copy_24_neon_fast(unsigned int *src,
++ * unsigned int *dst, int samples);
++ * Return value
++ * None
++ * Parameters
++ * src Source PCM24 samples
++ * dst Dest buffer to store pcm with header
++ * samples Contains sample count (=frame_count * channel_count)
++ */
++
++hdmi_dma_copy_24_neon_fast:
++ vpush {d8}
++
++ mov r12, #1 /* construct vector(1) */
++ vdup.8 d8, r12
++
++hdmi_dma_copy_24_neon_fast_start:
++ /* get 8 samples to q0 and q1 */
++ vld1.32 {d0, d1, d2, d3}, [r0]! /* TODO: aligned */
++
++ /* pld [r1, #(64*4)] */
++
++ /* xor every bit */
++ vcnt.8 q2, q0 /* count of 1s */
++ vpadd.i8 d4, d4, d5 /* only care about the LSB in every element */
++ vcnt.8 q3, q1
++ vpadd.i8 d6, d6, d7
++ vpadd.i8 d4, d4, d6 /* d4: contains xor result and other dirty bits */
++ vand d4, d4, d8 /* clear other bits while keep the least bit */
++ vshl.u8 d4, d4, #3 /* bit p: d4 = d4 << 3 */
++
++ /* store: (d4 << 24 | q0) */
++ vmovl.u8 q3, d4 /* expand from char to short */
++ vmovl.u16 q2, d6 /* expand from short to int */
++ vmovl.u16 q3, d7
++ vshl.u32 q2, q2, #24
++ vshl.u32 q3, q3, #24
++ vorr q0, q0, q2
++ vorr q1, q1, q3
++ vst1.32 {d0, d1, d2, d3}, [r1]!
++
++ /* decrease sample count */
++ subs r2, r2, #8
++ bne hdmi_dma_copy_24_neon_fast_start
++
++ vpop {d8}
++ mov pc, lr
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-audmux.c linux-xbian-imx6/sound/soc/fsl/imx-audmux.c
+--- linux-4.1.3/sound/soc/fsl/imx-audmux.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/imx-audmux.c 2015-07-27 23:13:11.153188979 +0200
+@@ -67,7 +67,7 @@
+ {
+ ssize_t ret;
+ char *buf;
+- uintptr_t port = (uintptr_t)file->private_data;
++ int port = (int)file->private_data;
+ u32 pdcr, ptcr;
+
+ if (audmux_clk) {
+@@ -145,9 +145,9 @@
+ .llseek = default_llseek,
+ };
+
+-static void audmux_debugfs_init(void)
++static void __init audmux_debugfs_init(void)
+ {
+- uintptr_t i;
++ int i;
+ char buf[20];
+
+ audmux_debugfs_root = debugfs_create_dir("audmux", NULL);
+@@ -157,10 +157,10 @@
+ }
+
+ for (i = 0; i < MX31_AUDMUX_PORT7_SSI_PINS_7 + 1; i++) {
+- snprintf(buf, sizeof(buf), "ssi%lu", i);
++ snprintf(buf, sizeof(buf), "ssi%d", i);
+ if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
+ (void *)i, &audmux_debugfs_fops))
+- pr_warning("Failed to create AUDMUX port %lu debugfs file\n",
++ pr_warning("Failed to create AUDMUX port %d debugfs file\n",
+ i);
+ }
+ }
+@@ -356,6 +356,7 @@
+ .id_table = imx_audmux_ids,
+ .driver = {
+ .name = DRIVER_NAME,
++ .owner = THIS_MODULE,
+ .of_match_table = imx_audmux_dt_ids,
+ }
+ };
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-hdmi.c linux-xbian-imx6/sound/soc/fsl/imx-hdmi.c
+--- linux-4.1.3/sound/soc/fsl/imx-hdmi.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/sound/soc/fsl/imx-hdmi.c 2015-07-27 23:13:11.153188979 +0200
+@@ -0,0 +1,114 @@
++/*
++ * ASoC HDMI Transmitter driver for IMX development boards
++ *
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * based on stmp3780_devb_hdmi.c
++ *
++ * Vladimir Barinov <vbarinov@embeddedalley.com>
++ *
++ * Copyright 2008 SigmaTel, Inc
++ * Copyright 2008 Embedded Alley Solutions, Inc
++ *
++ * This file is licensed under the terms of the GNU General Public License
++ * version 2. This program is licensed "as is" without any warranty of any
++ * kind, whether express or implied.
++ */
++
++#include <linux/module.h>
++#include <linux/of_platform.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <sound/soc.h>
++
++#include "imx-hdmi.h"
++
++/* imx digital audio interface glue - connects codec <--> CPU */
++static struct snd_soc_dai_link imx_hdmi_dai_link = {
++ .name = "i.MX HDMI Audio Tx",
++ .stream_name = "i.MX HDMI Audio Tx",
++ .codec_dai_name = "hdmi-hifi",
++ .codec_name = "hdmi-audio-codec",
++ .platform_name = "imx-hdmi-audio",
++ .ignore_pmdown_time = 1,
++};
++
++static struct snd_soc_card snd_soc_card_imx_hdmi = {
++ .name = "imx-hdmi-soc",
++ .dai_link = &imx_hdmi_dai_link,
++ .num_links = 1,
++};
++
++static int imx_hdmi_audio_probe(struct platform_device *pdev)
++{
++ struct device_node *hdmi_np, *np = pdev->dev.of_node;
++ struct snd_soc_card *card = &snd_soc_card_imx_hdmi;
++ struct platform_device *hdmi_pdev;
++ int ret = 0;
++
++ if (!hdmi_get_registered()) {
++ dev_err(&pdev->dev, "initialize HDMI-audio failed. load HDMI-video first!\n");
++ return -ENODEV;
++ }
++
++ hdmi_np = of_parse_phandle(np, "hdmi-controller", 0);
++ if (!hdmi_np) {
++ dev_err(&pdev->dev, "failed to find hdmi-audio cpudai\n");
++ ret = -EINVAL;
++ goto end;
++ }
++
++ hdmi_pdev = of_find_device_by_node(hdmi_np);
++ if (!hdmi_pdev) {
++ dev_err(&pdev->dev, "failed to find SSI platform device\n");
++ ret = -EINVAL;
++ goto end;
++ }
++
++ card->dev = &pdev->dev;
++ card->dai_link->cpu_dai_name = dev_name(&hdmi_pdev->dev);
++
++ platform_set_drvdata(pdev, card);
++
++ ret = snd_soc_register_card(card);
++ if (ret)
++ dev_err(&pdev->dev, "failed to register card: %d\n", ret);
++
++end:
++ if (hdmi_np)
++ of_node_put(hdmi_np);
++
++ return ret;
++}
++
++static int imx_hdmi_audio_remove(struct platform_device *pdev)
++{
++ struct snd_soc_card *card = platform_get_drvdata(pdev);
++
++ snd_soc_unregister_card(card);
++
++ return 0;
++}
++
++static const struct of_device_id imx_hdmi_dt_ids[] = {
++ { .compatible = "fsl,imx-audio-hdmi", },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, imx_hdmi_dt_ids);
++
++static struct platform_driver imx_hdmi_audio_driver = {
++ .probe = imx_hdmi_audio_probe,
++ .remove = imx_hdmi_audio_remove,
++ .driver = {
++ .of_match_table = imx_hdmi_dt_ids,
++ .name = "imx-audio-hdmi",
++ .owner = THIS_MODULE,
++ .pm = &snd_soc_pm_ops,
++ },
++};
++
++module_platform_driver(imx_hdmi_audio_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("IMX HDMI TX ASoC driver");
++MODULE_LICENSE("GPL");
++MODULE_ALIAS("platform:imx-audio-hdmi");
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-hdmi-dma.c linux-xbian-imx6/sound/soc/fsl/imx-hdmi-dma.c
+--- linux-4.1.3/sound/soc/fsl/imx-hdmi-dma.c 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/sound/soc/fsl/imx-hdmi-dma.c 2015-07-27 23:13:11.153188979 +0200
+@@ -0,0 +1,1256 @@
++/*
++ * imx-hdmi-dma.c -- HDMI DMA driver for ALSA Soc Audio Layer
++ *
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * based on imx-pcm-dma-mx2.c
++ * Copyright 2009 Sascha Hauer <s.hauer@pengutronix.de>
++ *
++ * This code is based on code copyrighted by Freescale,
++ * Liam Girdwood, Javier Martin and probably others.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2 of the License, or (at your
++ * option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/mfd/mxc-hdmi-core.h>
++#include <linux/platform_data/dma-imx.h>
++
++#include <video/mxc_hdmi.h>
++
++#include "imx-hdmi.h"
++
++#define HDMI_DMA_BURST_UNSPECIFIED_LEGNTH 0
++#define HDMI_DMA_BURST_INCR4 1
++#define HDMI_DMA_BURST_INCR8 2
++#define HDMI_DMA_BURST_INCR16 3
++
++#define HDMI_BASE_ADDR 0x00120000
++
++struct hdmi_sdma_script {
++ int control_reg_addr;
++ int status_reg_addr;
++ int dma_start_addr;
++ u32 buffer[20];
++};
++
++struct hdmi_dma_priv {
++ struct snd_pcm_substream *substream;
++ struct platform_device *pdev;
++
++ struct snd_dma_buffer hw_buffer;
++ unsigned long buffer_bytes;
++ unsigned long appl_bytes;
++
++ int periods;
++ int period_time;
++ int period_bytes;
++ int dma_period_bytes;
++ int buffer_ratio;
++
++ unsigned long offset;
++
++ snd_pcm_format_t format;
++ int sample_align;
++ int sample_bits;
++ int channels;
++ int rate;
++
++ int frame_idx;
++
++ bool tx_active;
++ spinlock_t irq_lock;
++
++ /* SDMA part */
++ dma_addr_t phy_hdmi_sdma_t;
++ struct hdmi_sdma_script *hdmi_sdma_t;
++ struct dma_chan *dma_channel;
++ struct imx_dma_data dma_data;
++ struct dma_async_tx_descriptor *desc;
++ struct imx_hdmi_sdma_params sdma_params;
++};
++
++/* bit 0:0:0:b:p(0):c:(u)0:(v)0 */
++/* max 8 channels supported; channels are interleaved */
++static u8 g_packet_head_table[48 * 8];
++
++/* channel remapping for hdmi_dma_copy_xxxx() */
++static u8 g_channel_remap_table[24];
++
++/* default mapping tables */
++static const u8 channel_maps_alsa_cea[5][8] = {
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 0CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 2CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 4CH: no remapping */
++ { 0, 1, 4, 5, 3, 2, 6, 7 }, /* 6CH: ALSA5.1 to CEA */
++ { 0, 1, 6, 7, 3, 2, 4, 5 } /* 8CH: ALSA7.1 to CEA */
++};
++
++static const u8 channel_maps_cea_alsa[5][8] = {
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 0CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 2CH: no remapping */
++ { 0, 1, 2, 3, 4, 5, 6, 7 }, /* 4CH: no remapping */
++ { 0, 1, 5, 4, 2, 3, 6, 7 }, /* 6CH: CEA to ALSA5.1 */
++ { 0, 1, 5, 4, 6, 7, 2, 3 } /* 8CH: CEA to ALSA7.1 */
++};
++
++union hdmi_audio_header_t iec_header;
++EXPORT_SYMBOL(iec_header);
++
++/*
++ * Note that the period size for DMA != period size for ALSA because the
++ * driver adds iec frame info to the audio samples (in hdmi_dma_copy).
++ *
++ * Each 4 byte subframe = 1 byte of iec data + 3 byte audio sample.
++ *
++ * A 16 bit audio sample becomes 32 bits including the frame info. Ratio=2
++ * A 24 bit audio sample becomes 32 bits including the frame info. Ratio=3:4
++ * If the 24 bit raw audio is in 32 bit words, the
++ *
++ * Original Packed into subframe Ratio of size Format
++ * sample how many size of DMA buffer
++ * (bits) bits to ALSA buffer
++ * -------- ----------- -------- -------------- ------------------------
++ * 16 16 32 2 SNDRV_PCM_FORMAT_S16_LE
++ * 24 24 32 1.33 SNDRV_PCM_FORMAT_S24_3LE*
++ * 24 32 32 1 SNDRV_PCM_FORMAT_S24_LE
++ *
++ * *so SNDRV_PCM_FORMAT_S24_3LE is not supported.
++ */
++
++/*
++ * The minimum dma period is one IEC audio frame (192 * 4 * channels).
++ * The maximum dma period for the HDMI DMA is 8K.
++ *
++ * channels minimum maximum
++ * dma period dma period
++ * -------- ------------------ ----------
++ * 2 192 * 4 * 2 = 1536 * 4 = 6144
++ * 4 192 * 4 * 4 = 3072 * 2 = 6144
++ * 6 192 * 4 * 6 = 4608 * 1 = 4608
++ * 8 192 * 4 * 8 = 6144 * 1 = 6144
++ *
++ * Bottom line:
++ * 1. Must keep the ratio of DMA buffer to ALSA buffer consistent.
++ * 2. frame_idx is saved in the private data, so even if a frame cannot be
++ * transmitted in a period, it can be continued in the next period. This
++ * is necessary for 6 ch.
++ */
++#define HDMI_DMA_PERIOD_BYTES (12288)
++#define HDMI_DMA_BUF_SIZE (1280 * 1024)
++#define HDMI_PCM_BUF_SIZE (1280 * 1024)
++
++#define hdmi_audio_debug(dev, reg) \
++ dev_dbg(dev, #reg ": 0x%02x\n", hdmi_readb(reg))
++
++#ifdef DEBUG
++static void dumpregs(struct device *dev)
++{
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_CONF0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_START);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STOP);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_THRSLD);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STRADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STPADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BSTADDR0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MBLENGTH0);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MBLENGTH1);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_STAT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_INT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_MASK);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_POL);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_CONF1);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFSTAT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFINT);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFMASK);
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_BUFFPOL);
++ hdmi_audio_debug(dev, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_audio_debug(dev, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_audio_debug(dev, HDMI_IH_MUTE);
++}
++
++static void dumppriv(struct device *dev, struct hdmi_dma_priv *priv)
++{
++ dev_dbg(dev, "channels = %d\n", priv->channels);
++ dev_dbg(dev, "periods = %d\n", priv->periods);
++ dev_dbg(dev, "period_bytes = %d\n", priv->period_bytes);
++ dev_dbg(dev, "dma period_bytes = %d\n", priv->dma_period_bytes);
++ dev_dbg(dev, "buffer_ratio = %d\n", priv->buffer_ratio);
++ dev_dbg(dev, "hw dma buffer = 0x%08x\n", (int)priv->hw_buffer.addr);
++ dev_dbg(dev, "dma buf size = %d\n", (int)priv->buffer_bytes);
++ dev_dbg(dev, "sample_rate = %d\n", (int)priv->rate);
++}
++#else
++static void dumpregs(struct device *dev) {}
++static void dumppriv(struct device *dev, struct hdmi_dma_priv *priv) {}
++#endif
++
++/*
++ * Conditions for DMA to work:
++ * ((final_addr - initial_addr)>>2)+1) < 2k. So max period is 8k.
++ * (inital_addr & 0x3) == 0
++ * (final_addr & 0x3) == 0x3
++ *
++ * The DMA Period should be an integer multiple of the IEC 60958 audio
++ * frame size, which is 768 bytes (192 * 4).
++ */
++static void hdmi_dma_set_addr(int start_addr, int dma_period_bytes)
++{
++ int final_addr = start_addr + dma_period_bytes - 1;
++
++ hdmi_write4(start_addr, HDMI_AHB_DMA_STRADDR0);
++ hdmi_write4(final_addr, HDMI_AHB_DMA_STPADDR0);
++}
++
++static void hdmi_dma_irq_set(bool set)
++{
++ u8 val = hdmi_readb(HDMI_AHB_DMA_MASK);
++
++ if (set)
++ val |= HDMI_AHB_DMA_DONE;
++ else
++ val &= (u8)~HDMI_AHB_DMA_DONE;
++
++ hdmi_writeb(val, HDMI_AHB_DMA_MASK);
++}
++
++static void hdmi_mask(int mask)
++{
++ u8 regval = hdmi_readb(HDMI_AHB_DMA_MASK);
++
++ if (mask)
++ regval |= HDMI_AHB_DMA_ERROR | HDMI_AHB_DMA_FIFO_EMPTY;
++ else
++ regval &= (u8)~(HDMI_AHB_DMA_ERROR | HDMI_AHB_DMA_FIFO_EMPTY);
++
++ hdmi_writeb(regval, HDMI_AHB_DMA_MASK);
++}
++
++static inline int odd_ones(unsigned a)
++{
++ a ^= a >> 16;
++ a ^= a >> 8;
++ a ^= a >> 4;
++ a ^= a >> 2;
++ a ^= a >> 1;
++
++ return a & 1;
++}
++
++/* Add frame information for one pcm subframe */
++static u32 hdmi_dma_add_frame_info(struct hdmi_dma_priv *priv,
++ u32 pcm_data, int subframe_idx)
++{
++ union hdmi_audio_dma_data_t subframe;
++ union hdmi_audio_header_t tmp_header;
++
++ subframe.U = 0;
++
++ if (priv->frame_idx < 42) {
++ tmp_header = iec_header;
++
++ /* fill v (validity) */
++ subframe.B.v = tmp_header.B.linear_pcm;
++
++ /* fill c (channel status) */
++ if (tmp_header.B.linear_pcm == 0)
++ tmp_header.B.channel = subframe_idx + 1;
++ subframe.B.c = tmp_header.U >> priv->frame_idx;
++ } else {
++ /* fill v (validity), c is always zero */
++ subframe.B.v = iec_header.B.linear_pcm;
++ }
++
++ /* fill data */
++ if (priv->sample_bits == 16)
++ pcm_data <<= 8;
++ subframe.B.data = pcm_data;
++
++ /* fill p (parity) Note: Do not include b ! */
++ subframe.B.p = odd_ones(subframe.U);
++
++ /* fill b (start-of-block) */
++ if (priv->frame_idx == 0)
++ subframe.B.b = 1;
++
++ return subframe.U;
++}
++
++static void init_table(int channels)
++{
++ int i, map_sel, ch;
++ unsigned char *p = g_packet_head_table;
++ union hdmi_audio_header_t tmp_header = iec_header;
++
++ for (i = 0; i < 48; i++) {
++ int b = 0;
++ if (i == 0)
++ b = 1;
++
++ for (ch = 0; ch < channels; ch++) {
++ int c = 0;
++ if (i < 42) {
++ tmp_header.B.channel = ch + 1;
++ c = (tmp_header.U >> i) & 0x1;
++ }
++ /* preset bit p as c */
++ *p++ = (b << 4) | (c << 2) | (c << 3);
++ }
++ }
++
++ map_sel = channels / 2;
++ for (i = 0; i < 24; i++) {
++ g_channel_remap_table[i] = (i / channels) * channels +
++ channel_maps_cea_alsa[map_sel][i % channels];
++ }
++}
++
++/* Optimization for IEC head */
++static void hdmi_dma_copy_16_c_lut(u16 *src, u32 *dst, int samples,
++ u8 *lookup_table)
++{
++ u32 sample, head;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]];
++
++ /* get packet header and p-bit */
++ head = *lookup_table++ ^ (odd_ones(sample) << 3);
++
++ /* store sample and header */
++ *dst++ = (head << 24) | (sample << 8);
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_16_c_fast(u16 *src, u32 *dst, int samples)
++{
++ u32 sample;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]];
++
++ /* store sample and p-bit */
++ *dst++ = (odd_ones(sample) << (3+24)) | (sample << 8);
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_24_c_lut(u32 *src, u32 *dst, int samples,
++ u8 *lookup_table)
++{
++ u32 sample, head;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]] & 0x00ffffff;
++
++ /* get packet header and p-bit */
++ head = *lookup_table++ ^ (odd_ones(sample) << 3);
++
++ /* store sample and header */
++ *dst++ = (head << 24) | sample;
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_dma_copy_24_c_fast(u32 *src, u32 *dst, int samples)
++{
++ u32 sample;
++ int i = 0;
++
++ while (samples--) {
++ /* get source sample */
++ sample = src[g_channel_remap_table[i]] & 0x00ffffff;
++
++ /* store sample and p-bit */
++ *dst++ = (odd_ones(sample) << (3+24)) | sample;
++
++ if (++i == 24) {
++ src += 24;
++ i = 0;
++ }
++ }
++}
++
++static void hdmi_mmap_copy(u8 *src, int samplesize, u32 *dst, int framecnt, int channelcnt)
++{
++ /* split input frames into 192-frame each */
++ int count_in_192 = (framecnt + 191) / 192;
++ int i;
++
++ typedef void (*fn_copy_lut)(u8 *src, u32 *dst, int samples, u8 *lookup_table);
++ typedef void (*fn_copy_fast)(u8 *src, u32 *dst, int samples);
++ fn_copy_lut copy_lut;
++ fn_copy_fast copy_fast;
++
++ if (samplesize == 4) {
++ copy_lut = (fn_copy_lut)hdmi_dma_copy_24_c_lut;
++ copy_fast = (fn_copy_fast)hdmi_dma_copy_24_c_fast;
++ } else {
++ copy_lut = (fn_copy_lut)hdmi_dma_copy_16_c_lut;
++ copy_fast = (fn_copy_fast)hdmi_dma_copy_16_c_fast;
++ }
++
++ for (i = 0; i < count_in_192; i++) {
++ int count, samples;
++
++ /* handles frame index [0, 48) */
++ count = (framecnt < 48) ? framecnt : 48;
++ samples = count * channelcnt;
++ copy_lut(src, dst, samples, g_packet_head_table);
++ framecnt -= count;
++ if (framecnt == 0)
++ break;
++
++ src += samples * samplesize;
++ dst += samples;
++
++ /* handles frame index [48, 192) */
++ count = (framecnt < 192 - 48) ? framecnt : 192 - 48;
++ samples = count * channelcnt;
++ copy_fast(src, dst, samples);
++ framecnt -= count;
++ src += samples * samplesize;
++ dst += samples;
++ }
++}
++
++static void hdmi_dma_mmap_copy(struct snd_pcm_substream *substream,
++ int offset, int count)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++ u32 framecount, *dst;
++
++ framecount = count / (priv->sample_align * priv->channels);
++
++ /* hw_buffer is the destination for pcm data plus frame info. */
++ dst = (u32 *)(priv->hw_buffer.area + (offset * priv->buffer_ratio));
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ case SNDRV_PCM_FORMAT_S24_LE:
++ /* dma_buffer is the mmapped buffer we are copying pcm from. */
++ hdmi_mmap_copy(runtime->dma_area + offset,
++ priv->sample_align, dst, framecount, priv->channels);
++ break;
++ default:
++ dev_err(dev, "unsupported sample format %s\n",
++ snd_pcm_format_name(priv->format));
++ return;
++ }
++}
++
++static void hdmi_dma_data_copy(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv, char type)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned long offset, count, appl_bytes, space_to_end;
++
++ if (runtime->access != SNDRV_PCM_ACCESS_MMAP_INTERLEAVED)
++ return;
++
++ appl_bytes = frames_to_bytes(runtime, runtime->status->hw_ptr);
++
++ switch (type) {
++ case 'p':
++ offset = (appl_bytes + 2 * priv->period_bytes) % priv->buffer_bytes;
++ count = priv->period_bytes;
++ space_to_end = priv->period_bytes;
++ break;
++ case 'b':
++ offset = appl_bytes % priv->buffer_bytes;
++ count = priv->buffer_bytes;
++ space_to_end = priv->buffer_bytes - offset;
++ break;
++ default:
++ return;
++ }
++
++ if (count <= space_to_end) {
++ hdmi_dma_mmap_copy(substream, offset, count);
++ } else {
++ hdmi_dma_mmap_copy(substream, offset, space_to_end);
++ hdmi_dma_mmap_copy(substream, 0, count - space_to_end);
++ }
++}
++
++static void hdmi_sdma_callback(void *data)
++{
++ struct hdmi_dma_priv *priv = (struct hdmi_dma_priv *)data;
++ struct snd_pcm_substream *substream = priv->substream;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ if (runtime && runtime->dma_area && priv->tx_active) {
++ priv->offset += priv->period_bytes;
++ priv->offset %= priv->period_bytes * priv->periods;
++
++ /* Copy data by period_bytes */
++ hdmi_dma_data_copy(substream, priv, 'p');
++
++ snd_pcm_period_elapsed(substream);
++ }
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++
++ return;
++}
++
++static int hdmi_dma_set_thrsld_incrtype(struct device *dev, int channels)
++{
++ u8 mask = HDMI_AHB_DMA_CONF0_BURST_MODE | HDMI_AHB_DMA_CONF0_INCR_TYPE_MASK;
++ u8 val = hdmi_readb(HDMI_AHB_DMA_CONF0) & ~mask;
++ int incr_type, threshold;
++
++ switch (hdmi_readb(HDMI_REVISION_ID)) {
++ case 0x0a:
++ incr_type = HDMI_DMA_BURST_INCR4;
++ if (channels == 2)
++ threshold = 126;
++ else
++ threshold = 124;
++ break;
++ case 0x1a:
++ incr_type = HDMI_DMA_BURST_INCR8;
++ threshold = 128;
++ break;
++ default:
++ dev_err(dev, "unknown hdmi controller!\n");
++ return -ENODEV;
++ }
++
++ hdmi_writeb(threshold, HDMI_AHB_DMA_THRSLD);
++
++ switch (incr_type) {
++ case HDMI_DMA_BURST_UNSPECIFIED_LEGNTH:
++ break;
++ case HDMI_DMA_BURST_INCR4:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE;
++ break;
++ case HDMI_DMA_BURST_INCR8:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE |
++ HDMI_AHB_DMA_CONF0_INCR8;
++ break;
++ case HDMI_DMA_BURST_INCR16:
++ val |= HDMI_AHB_DMA_CONF0_BURST_MODE |
++ HDMI_AHB_DMA_CONF0_INCR16;
++ break;
++ default:
++ dev_err(dev, "invalid increment type: %d!", incr_type);
++ return -EINVAL;
++ }
++
++ hdmi_writeb(val, HDMI_AHB_DMA_CONF0);
++
++ hdmi_audio_debug(dev, HDMI_AHB_DMA_THRSLD);
++
++ return 0;
++}
++
++static void hdmi_dma_set_hbr(struct device *dev, int channels)
++{
++ u8 value = hdmi_readb(HDMI_AHB_DMA_CONF0) & (~HDMI_AHB_DMA_CONF0_HBR_MASK);
++
++ /* non audio AES and 8 channels means we have to enable HBR */
++ if ((channels == 8) &&
++ (iec_header.B.linear_pcm == 0)) {
++ value |= HDMI_AHB_DMA_CONF0_HBR_MASK;
++ dev_info(dev, "Enabling HBR");
++ }
++
++ hdmi_writeb(value, HDMI_AHB_DMA_CONF0);
++}
++
++static int hdmi_dma_configure_dma(struct device *dev, int channels)
++{
++ int ret;
++ static u8 chan_enable[] = { 0x00, 0x03, 0x33, 0x3f, 0xff };
++
++ if (channels <= 0 || channels > 8 || channels % 2 != 0) {
++ dev_err(dev, "unsupported channel number: %d\n", channels);
++ return -EINVAL;
++ }
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, EN_HLOCK, 0x1);
++
++ ret = hdmi_dma_set_thrsld_incrtype(dev, channels);
++ if (ret)
++ return ret;
++
++ hdmi_writeb(chan_enable[channels / 2], HDMI_AHB_DMA_CONF1);
++
++ /* Handle HBR */
++ hdmi_dma_set_hbr(dev, channels);
++
++ return 0;
++}
++
++static void hdmi_dma_init_iec_header(void)
++{
++ iec_header.U = 0;
++
++ iec_header.B.consumer = 0; /* Consumer use */
++ iec_header.B.linear_pcm = 0; /* linear pcm audio */
++ iec_header.B.copyright = 1; /* no copyright */
++ iec_header.B.pre_emphasis = 0; /* 2 channels without pre-emphasis */
++ iec_header.B.mode = 0; /* Mode 0 */
++
++ iec_header.B.category_code = 0;
++
++ iec_header.B.source = 2; /* stereo */
++ iec_header.B.channel = 0;
++
++ iec_header.B.sample_freq = 0x02; /* 48 KHz */
++ iec_header.B.clock_acc = 0; /* Level II */
++
++ iec_header.B.word_length = 0x02; /* 16 bits */
++ iec_header.B.org_sample_freq = 0x0D; /* 48 KHz */
++
++ iec_header.B.cgms_a = 0; /* Copying is permitted without restriction */
++}
++
++static int hdmi_dma_update_iec_header(struct snd_pcm_substream *substream)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++
++ iec_header.B.source = priv->channels;
++
++ switch (priv->rate) {
++ case 32000:
++ iec_header.B.sample_freq = 0x03;
++ iec_header.B.org_sample_freq = 0x0C;
++ break;
++ case 44100:
++ iec_header.B.sample_freq = 0x00;
++ iec_header.B.org_sample_freq = 0x0F;
++ break;
++ case 48000:
++ iec_header.B.sample_freq = 0x02;
++ iec_header.B.org_sample_freq = 0x0D;
++ break;
++ case 88200:
++ iec_header.B.sample_freq = 0x08;
++ iec_header.B.org_sample_freq = 0x07;
++ break;
++ case 96000:
++ iec_header.B.sample_freq = 0x0A;
++ iec_header.B.org_sample_freq = 0x05;
++ break;
++ case 176400:
++ iec_header.B.sample_freq = 0x0C;
++ iec_header.B.org_sample_freq = 0x03;
++ break;
++ case 192000:
++ iec_header.B.sample_freq = 0x0E;
++ iec_header.B.org_sample_freq = 0x01;
++ break;
++ default:
++ dev_err(dev, "unsupported sample rate\n");
++ return -EFAULT;
++ }
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ iec_header.B.word_length = 0x02;
++ break;
++ case SNDRV_PCM_FORMAT_S24_LE:
++ iec_header.B.word_length = 0x0b;
++ break;
++ default:
++ return -EFAULT;
++ }
++
++ return 0;
++}
++
++/*
++ * The HDMI block transmits the audio data without adding any of the audio
++ * frame bits. So we have to copy the raw dma data from the ALSA buffer
++ * to the DMA buffer, adding the frame information.
++ */
++static int hdmi_dma_copy(struct snd_pcm_substream *substream, int channel,
++ snd_pcm_uframes_t pos, void __user *buf,
++ snd_pcm_uframes_t frames)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ unsigned int count = frames_to_bytes(runtime, frames);
++ unsigned int pos_bytes = frames_to_bytes(runtime, pos);
++ int channel_no, pcm_idx, subframe_idx, bits_left, sample_bits, map_sel;
++ u32 pcm_data[8], pcm_temp, *hw_buf, sample_block, inc_mask;
++
++ /* Adding frame info to pcm data from userspace and copy to hw_buffer */
++ hw_buf = (u32 *)(priv->hw_buffer.area + (pos_bytes * priv->buffer_ratio));
++
++ sample_bits = priv->sample_align * 8;
++ sample_block = priv->sample_align * priv->channels;
++
++ if (iec_header.B.linear_pcm == 0) {
++ map_sel = priv->channels / 2;
++ inc_mask = 1 << (priv->channels - 1);
++ } else {
++ map_sel = 0;
++ inc_mask = 0xaa;
++ }
++
++ while (count > 0) {
++ if (copy_from_user(pcm_data, buf, sample_block))
++ return -EFAULT;
++
++ buf += sample_block;
++ count -= sample_block;
++
++ channel_no = pcm_idx = 0;
++ do {
++ pcm_temp = pcm_data[pcm_idx++];
++ bits_left = 32;
++ for (;;) {
++ /* re-map channels */
++ subframe_idx = channel_maps_alsa_cea[map_sel][channel_no];
++
++ /* Save the header info to the audio dma buffer */
++ hw_buf[subframe_idx] = hdmi_dma_add_frame_info(
++ priv, pcm_temp, subframe_idx);
++
++ if (inc_mask & (1 << channel_no)) {
++ if (++priv->frame_idx == 192)
++ priv->frame_idx = 0;
++ }
++
++ channel_no++;
++
++ if (bits_left <= sample_bits)
++ break;
++
++ bits_left -= sample_bits;
++ pcm_temp >>= sample_bits;
++ }
++ } while (channel_no < priv->channels);
++
++ hw_buf += priv->channels;
++ }
++
++ return 0;
++}
++
++static int hdmi_sdma_initbuf(struct device *dev, struct hdmi_dma_priv *priv)
++{
++ struct hdmi_sdma_script *hdmi_sdma_t = priv->hdmi_sdma_t;
++ u32 *head, *tail, i;
++
++ if (!hdmi_sdma_t) {
++ dev_err(dev, "hdmi private addr invalid!!!\n");
++ return -EINVAL;
++ }
++
++ hdmi_sdma_t->control_reg_addr = HDMI_BASE_ADDR + HDMI_AHB_DMA_START;
++ hdmi_sdma_t->status_reg_addr = HDMI_BASE_ADDR + HDMI_IH_AHBDMAAUD_STAT0;
++ hdmi_sdma_t->dma_start_addr = HDMI_BASE_ADDR + HDMI_AHB_DMA_STRADDR0;
++
++ head = &hdmi_sdma_t->buffer[0];
++ tail = &hdmi_sdma_t->buffer[1];
++
++ for (i = 0; i < priv->sdma_params.buffer_num; i++) {
++ *head = priv->hw_buffer.addr + i * priv->period_bytes * priv->buffer_ratio;
++ *tail = *head + priv->dma_period_bytes - 1;
++ head += 2;
++ tail += 2;
++ }
++
++ return 0;
++}
++
++static int hdmi_sdma_config(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dai_dev = &priv->pdev->dev;
++ struct device *dev = rtd->platform->dev;
++ struct dma_slave_config slave_config;
++ int ret;
++
++ priv->dma_channel = dma_request_slave_channel(dai_dev, "tx");
++ if (priv->dma_channel == NULL) {
++ dev_err(dev, "failed to alloc dma channel\n");
++ return -EBUSY;
++ }
++
++ priv->dma_data.data_addr1 = &priv->sdma_params.buffer_num;
++ priv->dma_data.data_addr2 = &priv->sdma_params.phyaddr;
++ //priv->dma_data.peripheral_type = IMX_DMATYPE_HDMI;
++ priv->dma_channel->private = &priv->dma_data;
++
++ slave_config.direction = DMA_TRANS_NONE;
++
++ ret = dmaengine_slave_config(priv->dma_channel, &slave_config);
++ if (ret) {
++ dev_err(dev, "failed to config slave dma, (%d)\n", ret);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int hdmi_dma_hw_free(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ if (priv->dma_channel) {
++ dma_release_channel(priv->dma_channel);
++ priv->dma_channel = NULL;
++ }
++
++ return 0;
++}
++
++static int hdmi_dma_hw_params(struct snd_pcm_substream *substream,
++ struct snd_pcm_hw_params *params)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++ int ret;
++
++ priv->buffer_bytes = params_buffer_bytes(params);
++ priv->periods = params_periods(params);
++ priv->period_bytes = params_period_bytes(params);
++ priv->channels = params_channels(params);
++ priv->format = params_format(params);
++ priv->rate = params_rate(params);
++
++ priv->offset = 0;
++ priv->period_time = HZ / (priv->rate / params_period_size(params));
++
++ switch (priv->format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ priv->buffer_ratio = 2;
++ priv->sample_align = 2;
++ priv->sample_bits = 16;
++ break;
++ case SNDRV_PCM_FORMAT_S24_LE:
++ /* 24 bit audio in 32 bit word */
++ priv->buffer_ratio = 1;
++ priv->sample_align = 4;
++ priv->sample_bits = 24;
++ break;
++ default:
++ dev_err(dev, "unsupported sample format: %d\n", priv->format);
++ return -EINVAL;
++ }
++
++ priv->dma_period_bytes = priv->period_bytes * priv->buffer_ratio;
++ priv->sdma_params.buffer_num = priv->periods;
++ priv->sdma_params.phyaddr = priv->phy_hdmi_sdma_t;
++
++ ret = hdmi_sdma_initbuf(dev, priv);
++ if (ret)
++ return ret;
++
++ ret = hdmi_sdma_config(substream, priv);
++ if (ret)
++ return ret;
++
++ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
++
++ ret = hdmi_dma_configure_dma(dev, priv->channels);
++ if (ret)
++ return ret;
++
++ hdmi_dma_set_addr(priv->hw_buffer.addr, priv->dma_period_bytes);
++
++ dumppriv(dev, priv);
++
++ hdmi_dma_update_iec_header(substream);
++
++ /* Init par for mmap optimizate */
++ init_table(priv->channels);
++
++ priv->appl_bytes = 0;
++ priv->frame_idx = 0;
++
++ return 0;
++}
++
++static void hdmi_dma_trigger_init(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ unsigned long status;
++ bool hbr;
++
++ /*
++ * Set HBR mode (>192kHz IEC-61937 HD audio bitstreaming).
++ * This is done this late because userspace may alter the AESx
++ * parameters until the stream is finally prepared.
++ */
++ hbr = (iec_header.B.linear_pcm != 0 && priv->channels == 8);
++ hdmi_audio_writeb(AHB_DMA_CONF0, HBR, !!hbr);
++
++ /*
++ * Override AES3 - parameter: This is a temporary hack for
++ * callers that provide incorrect information when opening
++ * the device. 0x09 (i.e. 768K) is the only acceptable value.
++ */
++ if (hbr) {
++ iec_header.B.sample_freq = 0x09;
++ iec_header.B.org_sample_freq = 0x00;
++ }
++
++ priv->offset = 0;
++
++ /* Copy data by buffer_bytes */
++ hdmi_dma_data_copy(substream, priv, 'b');
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, SW_FIFO_RST, 0x1);
++
++ /* Delay after reset */
++ udelay(1);
++
++ status = hdmi_readb(HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_writeb(status, HDMI_IH_AHBDMAAUD_STAT0);
++}
++
++static int hdmi_dma_prepare_and_submit(struct snd_pcm_substream *substream,
++ struct hdmi_dma_priv *priv)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++
++ priv->desc = dmaengine_prep_dma_cyclic(priv->dma_channel, 0, 0, 0,
++ DMA_TRANS_NONE, 0);
++ if (!priv->desc) {
++ dev_err(dev, "failed to prepare slave dma\n");
++ return -EINVAL;
++ }
++
++ priv->desc->callback = hdmi_sdma_callback;
++ priv->desc->callback_param = (void *)priv;
++ dmaengine_submit(priv->desc);
++
++ return 0;
++}
++
++static int hdmi_dma_trigger(struct snd_pcm_substream *substream, int cmd)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++ struct device *dev = rtd->platform->dev;
++ int ret;
++
++ switch (cmd) {
++ case SNDRV_PCM_TRIGGER_START:
++ case SNDRV_PCM_TRIGGER_RESUME:
++ if (!check_hdmi_state())
++ return 0;
++ hdmi_dma_trigger_init(substream, priv);
++
++ dumpregs(dev);
++
++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ priv->tx_active = true;
++ hdmi_audio_writeb(AHB_DMA_START, START, 0x1);
++ hdmi_dma_irq_set(false);
++ hdmi_set_dma_mode(1);
++ ret = hdmi_dma_prepare_and_submit(substream, priv);
++ if (ret)
++ return ret;
++ dma_async_issue_pending(priv->desc->chan);
++ break;
++ case SNDRV_PCM_TRIGGER_STOP:
++ case SNDRV_PCM_TRIGGER_SUSPEND:
++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ dmaengine_terminate_all(priv->dma_channel);
++ hdmi_set_dma_mode(0);
++ hdmi_dma_irq_set(true);
++ hdmi_audio_writeb(AHB_DMA_STOP, STOP, 0x1);
++ priv->tx_active = false;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static snd_pcm_uframes_t hdmi_dma_pointer(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ return bytes_to_frames(runtime, priv->offset);
++}
++
++static struct snd_pcm_hardware snd_imx_hardware = {
++ .info = SNDRV_PCM_INFO_INTERLEAVED |
++ SNDRV_PCM_INFO_BLOCK_TRANSFER |
++ SNDRV_PCM_INFO_MMAP |
++ SNDRV_PCM_INFO_MMAP_VALID |
++ SNDRV_PCM_INFO_PAUSE |
++ SNDRV_PCM_INFO_RESUME,
++ .formats = MXC_HDMI_FORMATS_PLAYBACK,
++ .rate_min = 32000,
++ .channels_min = 2,
++ .channels_max = 8,
++ .buffer_bytes_max = HDMI_PCM_BUF_SIZE,
++ .period_bytes_min = HDMI_DMA_PERIOD_BYTES / 2,
++ .period_bytes_max = HDMI_DMA_PERIOD_BYTES / 2,
++ .periods_min = 2, //8
++ .periods_max = HDMI_DMA_BUF_SIZE / HDMI_DMA_PERIOD_BYTES,
++ .fifo_size = 0,
++};
++
++static void hdmi_dma_irq_enable(struct hdmi_dma_priv *priv)
++{
++ unsigned long flags;
++
++ hdmi_writeb(0xff, HDMI_AHB_DMA_POL);
++ hdmi_writeb(0xff, HDMI_AHB_DMA_BUFFPOL);
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ hdmi_writeb(0xff, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_dma_irq_set(false);
++ hdmi_mask(0);
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++}
++
++static void hdmi_dma_irq_disable(struct hdmi_dma_priv *priv)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->irq_lock, flags);
++
++ hdmi_dma_irq_set(true);
++ hdmi_writeb(0x0, HDMI_IH_MUTE_AHBDMAAUD_STAT0);
++ hdmi_writeb(0xff, HDMI_IH_AHBDMAAUD_STAT0);
++ hdmi_mask(1);
++
++ spin_unlock_irqrestore(&priv->irq_lock, flags);
++}
++
++static int hdmi_dma_open(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct device *dev = rtd->platform->dev;
++ struct hdmi_dma_priv *priv = dev_get_drvdata(dev);
++ int ret;
++
++ runtime->private_data = priv;
++
++ ret = mxc_hdmi_register_audio(substream);
++ if (ret < 0) {
++ dev_err(dev, "HDMI Video is not ready!\n");
++ return ret;
++ }
++
++ hdmi_audio_writeb(AHB_DMA_CONF0, SW_FIFO_RST, 0x1);
++
++ ret = snd_pcm_hw_constraint_integer(substream->runtime,
++ SNDRV_PCM_HW_PARAM_PERIODS);
++ if (ret < 0)
++ return ret;
++
++ snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
++
++ hdmi_dma_irq_enable(priv);
++
++ return 0;
++}
++
++static int hdmi_dma_close(struct snd_pcm_substream *substream)
++{
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ struct hdmi_dma_priv *priv = runtime->private_data;
++
++ hdmi_dma_irq_disable(priv);
++ mxc_hdmi_unregister_audio(substream);
++
++ return 0;
++}
++
++static struct snd_pcm_ops imx_hdmi_dma_pcm_ops = {
++ .open = hdmi_dma_open,
++ .close = hdmi_dma_close,
++ .ioctl = snd_pcm_lib_ioctl,
++ .hw_params = hdmi_dma_hw_params,
++ .hw_free = hdmi_dma_hw_free,
++ .trigger = hdmi_dma_trigger,
++ .pointer = hdmi_dma_pointer,
++ .copy = hdmi_dma_copy,
++};
++
++static int imx_hdmi_dma_pcm_new(struct snd_soc_pcm_runtime *rtd)
++{
++ struct hdmi_dma_priv *priv = dev_get_drvdata(rtd->platform->dev);
++ struct snd_card *card = rtd->card->snd_card;
++ struct snd_pcm_substream *substream;
++ struct snd_pcm *pcm = rtd->pcm;
++ u64 dma_mask = DMA_BIT_MASK(32);
++ int ret = 0;
++
++ if (!card->dev->dma_mask)
++ card->dev->dma_mask = &dma_mask;
++ if (!card->dev->coherent_dma_mask)
++ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
++
++ substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
++
++ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
++ HDMI_PCM_BUF_SIZE, &substream->dma_buffer);
++ if (ret) {
++ dev_err(card->dev, "failed to alloc playback dma buffer\n");
++ return ret;
++ }
++
++ priv->substream = substream;
++
++ /* Alloc the hw_buffer */
++ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
++ HDMI_DMA_BUF_SIZE, &priv->hw_buffer);
++ if (ret) {
++ dev_err(card->dev, "failed to alloc hw dma buffer\n");
++ return ret;
++ }
++
++ return ret;
++}
++
++static void imx_hdmi_dma_pcm_free(struct snd_pcm *pcm)
++{
++ int stream = SNDRV_PCM_STREAM_PLAYBACK;
++ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
++ struct snd_soc_pcm_runtime *rtd = pcm->private_data;
++ struct hdmi_dma_priv *priv = dev_get_drvdata(rtd->platform->dev);
++
++ if (substream) {
++ snd_dma_free_pages(&substream->dma_buffer);
++ substream->dma_buffer.area = NULL;
++ substream->dma_buffer.addr = 0;
++ }
++
++ /* Free the hw_buffer */
++ snd_dma_free_pages(&priv->hw_buffer);
++ priv->hw_buffer.area = NULL;
++ priv->hw_buffer.addr = 0;
++}
++
++static struct snd_soc_platform_driver imx_hdmi_platform = {
++ .ops = &imx_hdmi_dma_pcm_ops,
++ .pcm_new = imx_hdmi_dma_pcm_new,
++ .pcm_free = imx_hdmi_dma_pcm_free,
++};
++
++static int imx_soc_platform_probe(struct platform_device *pdev)
++{
++ struct imx_hdmi *hdmi_drvdata = platform_get_drvdata(pdev);
++ struct hdmi_dma_priv *priv;
++ int ret = 0;
++
++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ dev_err(&pdev->dev, "Failed to alloc hdmi_dma\n");
++ return -ENOMEM;
++ }
++
++ priv->hdmi_sdma_t = dma_alloc_coherent(NULL,
++ sizeof(struct hdmi_sdma_script),
++ &priv->phy_hdmi_sdma_t, GFP_KERNEL);
++ if (!priv->hdmi_sdma_t) {
++ dev_err(&pdev->dev, "Failed to alloc hdmi_sdma_t\n");
++ return -ENOMEM;
++ }
++
++ priv->tx_active = false;
++ spin_lock_init(&priv->irq_lock);
++
++ priv->pdev = hdmi_drvdata->pdev;
++
++ hdmi_dma_init_iec_header();
++
++ dev_set_drvdata(&pdev->dev, priv);
++
++ switch (hdmi_readb(HDMI_REVISION_ID)) {
++ case 0x0a:
++ snd_imx_hardware.period_bytes_max = HDMI_DMA_PERIOD_BYTES / 4;
++ snd_imx_hardware.period_bytes_min = HDMI_DMA_PERIOD_BYTES / 4;
++ snd_imx_hardware.periods_max = HDMI_DMA_BUF_SIZE / (HDMI_DMA_PERIOD_BYTES / 2);
++ break;
++ default:
++ break;
++ }
++
++ ret = snd_soc_register_platform(&pdev->dev, &imx_hdmi_platform);
++ if (ret)
++ goto err_plat;
++
++ return 0;
++
++err_plat:
++ dma_free_coherent(NULL, sizeof(struct hdmi_sdma_script),
++ priv->hdmi_sdma_t, priv->phy_hdmi_sdma_t);
++
++ return ret;
++}
++
++static int imx_soc_platform_remove(struct platform_device *pdev)
++{
++ struct hdmi_dma_priv *priv = dev_get_drvdata(&pdev->dev);
++
++ dma_free_coherent(NULL, sizeof(struct hdmi_sdma_script),
++ priv->hdmi_sdma_t, priv->phy_hdmi_sdma_t);
++
++ snd_soc_unregister_platform(&pdev->dev);
++
++ return 0;
++}
++
++static struct platform_driver imx_hdmi_dma_driver = {
++ .driver = {
++ .name = "imx-hdmi-audio",
++ .owner = THIS_MODULE,
++ },
++ .probe = imx_soc_platform_probe,
++ .remove = imx_soc_platform_remove,
++};
++
++module_platform_driver(imx_hdmi_dma_driver);
++
++MODULE_AUTHOR("Freescale Semiconductor, Inc.");
++MODULE_DESCRIPTION("i.MX HDMI audio DMA");
++MODULE_LICENSE("GPL");
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-hdmi.h linux-xbian-imx6/sound/soc/fsl/imx-hdmi.h
+--- linux-4.1.3/sound/soc/fsl/imx-hdmi.h 1970-01-01 01:00:00.000000000 +0100
++++ linux-xbian-imx6/sound/soc/fsl/imx-hdmi.h 2015-07-27 23:13:11.153188979 +0200
+@@ -0,0 +1,106 @@
++/*
++ * Copyright (C) 2011-2014 Freescale Semiconductor, Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
++ */
++
++#ifndef __IMX_HDMI_H
++#define __IMX_HDMI_H
++
++struct imx_hdmi_sdma_params {
++ dma_addr_t phyaddr;
++ u32 buffer_num;
++ int dma;
++};
++
++struct imx_hdmi {
++ struct snd_soc_dai_driver cpu_dai_drv;
++ struct platform_device *codec_dev;
++ struct platform_device *dma_dev;
++ struct platform_device *pdev;
++ struct clk *isfr_clk;
++ struct clk *iahb_clk;
++ struct clk *mipi_core_clk;
++};
++
++#define HDMI_MAX_RATES 7
++#define HDMI_MAX_SAMPLE_SIZE 3
++#define HDMI_MAX_CHANNEL_CONSTRAINTS 4
++
++#define MXC_HDMI_RATES_PLAYBACK \
++ (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
++ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
++ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
++
++#define MXC_HDMI_FORMATS_PLAYBACK \
++ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
++
++union hdmi_audio_header_t {
++ uint64_t U;
++ struct {
++ unsigned consumer:1;
++ unsigned linear_pcm:1;
++ unsigned copyright:1;
++ unsigned pre_emphasis:3;
++ unsigned mode:2;
++
++ unsigned category_code:8;
++
++ unsigned source:4;
++ unsigned channel:4;
++
++ unsigned sample_freq:4;
++ unsigned clock_acc:2;
++ unsigned reserved0:2;
++
++ unsigned word_length:4;
++ unsigned org_sample_freq:4;
++
++ unsigned cgms_a:2;
++ unsigned reserved1:6;
++
++ unsigned reserved2:8;
++
++ unsigned reserved3:8;
++ } B;
++ unsigned char status[8];
++};
++
++union hdmi_audio_dma_data_t {
++ uint32_t U;
++ struct {
++ unsigned data:24;
++ unsigned v:1;
++ unsigned u:1;
++ unsigned c:1;
++ unsigned p:1;
++ unsigned b:1;
++ unsigned reserved:3;
++ } B;
++};
++
++extern union hdmi_audio_header_t iec_header;
++
++#define hdmi_audio_writeb(reg, bit, val) \
++ do { \
++ hdmi_mask_writeb(val, HDMI_ ## reg, \
++ HDMI_ ## reg ## _ ## bit ## _OFFSET, \
++ HDMI_ ## reg ## _ ## bit ## _MASK); \
++ pr_debug("Set reg: HDMI_" #reg " (0x%x) "\
++ "bit: HDMI_" #reg "_" #bit " (%d) to val: %x\n", \
++ HDMI_ ## reg, HDMI_ ## reg ## _ ## bit ## _OFFSET, val); \
++ } while (0)
++
++#endif /* __IMX_HDMI_H */
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-pcm-dma.c linux-xbian-imx6/sound/soc/fsl/imx-pcm-dma.c
+--- linux-4.1.3/sound/soc/fsl/imx-pcm-dma.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/imx-pcm-dma.c 2015-07-27 23:13:11.153188979 +0200
+@@ -43,7 +43,7 @@
+ .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
+ .period_bytes_min = 128,
+ .period_bytes_max = 65535, /* Limited by SDMA engine */
+- .periods_min = 2,
++ .periods_min = 4,
+ .periods_max = 255,
+ .fifo_size = 0,
+ };
+@@ -55,10 +55,27 @@
+ .prealloc_buffer_size = IMX_SSI_DMABUF_SIZE,
+ };
+
+-int imx_pcm_dma_init(struct platform_device *pdev)
++int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
+ {
++ struct snd_dmaengine_pcm_config *config;
++ struct snd_pcm_hardware *pcm_hardware;
++
++ config = devm_kzalloc(&pdev->dev,
++ sizeof(struct snd_dmaengine_pcm_config), GFP_KERNEL);
++ *config = imx_dmaengine_pcm_config;
++ if (size)
++ config->prealloc_buffer_size = size;
++
++ pcm_hardware = devm_kzalloc(&pdev->dev,
++ sizeof(struct snd_pcm_hardware), GFP_KERNEL);
++ *pcm_hardware = imx_pcm_hardware;
++ if (size)
++ pcm_hardware->buffer_bytes_max = size;
++
++ config->pcm_hardware = pcm_hardware;
++
+ return devm_snd_dmaengine_pcm_register(&pdev->dev,
+- &imx_dmaengine_pcm_config,
++ config,
+ SND_DMAENGINE_PCM_FLAG_COMPAT);
+ }
+ EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-pcm.h linux-xbian-imx6/sound/soc/fsl/imx-pcm.h
+--- linux-4.1.3/sound/soc/fsl/imx-pcm.h 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/imx-pcm.h 2015-07-27 23:13:11.153188979 +0200
+@@ -19,6 +19,10 @@
+ * Do not change this as the FIQ handler depends on this size
+ */
+ #define IMX_SSI_DMABUF_SIZE (64 * 1024)
++#define IMX_SAI_DMABUF_SIZE (64 * 1024)
++#define IMX_SPDIF_DMABUF_SIZE (64 * 1024)
++#define IMX_ESAI_DMABUF_SIZE (256 * 1024)
++#define IMX_ASRC_DMABUF_SIZE (256 * 1024)
+
+ static inline void
+ imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
+@@ -39,9 +43,9 @@
+ };
+
+ #if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
+-int imx_pcm_dma_init(struct platform_device *pdev);
++int imx_pcm_dma_init(struct platform_device *pdev, size_t size);
+ #else
+-static inline int imx_pcm_dma_init(struct platform_device *pdev)
++static inline int imx_pcm_dma_init(struct platform_device *pdev, size_t size)
+ {
+ return -ENODEV;
+ }
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-sgtl5000.c linux-xbian-imx6/sound/soc/fsl/imx-sgtl5000.c
+--- linux-4.1.3/sound/soc/fsl/imx-sgtl5000.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/imx-sgtl5000.c 2015-07-27 23:13:11.153188979 +0200
+@@ -175,8 +175,10 @@
+ fail:
+ if (data && !IS_ERR(data->codec_clk))
+ clk_put(data->codec_clk);
+- of_node_put(ssi_np);
+- of_node_put(codec_np);
++ if (ssi_np)
++ of_node_put(ssi_np);
++ if (codec_np)
++ of_node_put(codec_np);
+
+ return ret;
+ }
+@@ -200,6 +202,7 @@
+ static struct platform_driver imx_sgtl5000_driver = {
+ .driver = {
+ .name = "imx-sgtl5000",
++ .owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
+ .of_match_table = imx_sgtl5000_dt_ids,
+ },
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-spdif.c linux-xbian-imx6/sound/soc/fsl/imx-spdif.c
+--- linux-4.1.3/sound/soc/fsl/imx-spdif.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/imx-spdif.c 2015-07-27 23:13:11.153188979 +0200
+@@ -66,16 +66,18 @@
+ if (ret)
+ goto end;
+
++ platform_set_drvdata(pdev, &data->card);
++ snd_soc_card_set_drvdata(&data->card, data);
++
+ ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed: %d\n", ret);
+ goto end;
+ }
+
+- platform_set_drvdata(pdev, data);
+-
+ end:
+- of_node_put(spdif_np);
++ if (spdif_np)
++ of_node_put(spdif_np);
+
+ return ret;
+ }
+@@ -89,6 +91,8 @@
+ static struct platform_driver imx_spdif_driver = {
+ .driver = {
+ .name = "imx-spdif",
++ .owner = THIS_MODULE,
++ .pm = &snd_soc_pm_ops,
+ .of_match_table = imx_spdif_dt_ids,
+ },
+ .probe = imx_spdif_audio_probe,
+diff -Nur linux-4.1.3/sound/soc/fsl/imx-ssi.c linux-xbian-imx6/sound/soc/fsl/imx-ssi.c
+--- linux-4.1.3/sound/soc/fsl/imx-ssi.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/imx-ssi.c 2015-07-27 23:13:11.153188979 +0200
+@@ -603,7 +603,7 @@
+ ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
+
+ ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
+- ssi->dma_init = imx_pcm_dma_init(pdev);
++ ssi->dma_init = imx_pcm_dma_init(pdev, IMX_SSI_DMABUF_SIZE);
+
+ if (ssi->fiq_init && ssi->dma_init) {
+ ret = ssi->fiq_init;
+diff -Nur linux-4.1.3/sound/soc/fsl/Kconfig linux-xbian-imx6/sound/soc/fsl/Kconfig
+--- linux-4.1.3/sound/soc/fsl/Kconfig 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/Kconfig 2015-07-27 23:13:11.149203200 +0200
+@@ -55,6 +55,9 @@
+ This option is only useful for out-of-tree drivers since
+ in-tree drivers select it automatically.
+
++config SND_SOC_FSL_HDMI
++ tristate
++
+ config SND_SOC_FSL_UTILS
+ tristate
+
+@@ -62,6 +65,11 @@
+ tristate
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+
++config SND_SOC_IMX_HDMI_DMA
++ bool
++ select SND_SOC_GENERIC_DMAENGINE_PCM
++ select SND_SOC_IMX_PCM_DMA
++
+ config SND_SOC_IMX_AUDMUX
+ tristate "Digital Audio Mux module support"
+ help
+@@ -235,7 +243,9 @@
+ select SND_SOC_WM8962
+ select SND_SOC_IMX_PCM_DMA
+ select SND_SOC_IMX_AUDMUX
++ select SND_SOC_FSL_SAI
+ select SND_SOC_FSL_SSI
++ select SND_SOC_FSL_UTILS
+ help
+ Say Y if you want to add support for SoC audio on an i.MX board with
+ a wm8962 codec.
+@@ -258,7 +268,9 @@
+ select SND_SOC_SGTL5000
+ select SND_SOC_IMX_PCM_DMA
+ select SND_SOC_IMX_AUDMUX
++ select SND_SOC_FSL_SAI
+ select SND_SOC_FSL_SSI
++ select SND_SOC_FSL_UTILS
+ help
+ Say Y if you want to add support for SoC audio on an i.MX board with
+ a sgtl5000 codec.
+@@ -280,6 +292,17 @@
+ select SND_SOC_MC13783
+ select SND_SOC_IMX_PCM_DMA
+
++config SND_SOC_IMX_HDMI
++ tristate "SoC Audio support for i.MX boards with HDMI port"
++ depends on MFD_MXC_HDMI
++ select SND_SOC_IMX_HDMI_DMA
++ select SND_SOC_FSL_HDMI
++ select SND_SOC_OMAP_HDMI_CODEC
++ help
++ SoC Audio support for i.MX boards with HDMI audio
++ Say Y if you want to add support for SoC audio on an i.MX board with
++ IMX HDMI.
++
+ config SND_SOC_FSL_ASOC_CARD
+ tristate "Generic ASoC Sound Card with ASRC support"
+ depends on OF && I2C
+diff -Nur linux-4.1.3/sound/soc/fsl/Makefile linux-xbian-imx6/sound/soc/fsl/Makefile
+--- linux-4.1.3/sound/soc/fsl/Makefile 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/fsl/Makefile 2015-07-27 23:13:11.149203200 +0200
+@@ -15,7 +15,8 @@
+ snd-soc-fsl-asrc-objs := fsl_asrc.o fsl_asrc_dma.o
+ snd-soc-fsl-sai-objs := fsl_sai.o
+ snd-soc-fsl-ssi-y := fsl_ssi.o
+-snd-soc-fsl-ssi-$(CONFIG_DEBUG_FS) += fsl_ssi_dbg.o
++##snd-soc-fsl-ssi-$(CONFIG_DEBUG_FS) += fsl_ssi_dbg.o
++snd-soc-fsl-hdmi-objs := fsl_hdmi.o
+ snd-soc-fsl-spdif-objs := fsl_spdif.o
+ snd-soc-fsl-esai-objs := fsl_esai.o
+ snd-soc-fsl-utils-objs := fsl_utils.o
+@@ -26,6 +27,7 @@
+ obj-$(CONFIG_SND_SOC_FSL_SSI) += snd-soc-fsl-ssi.o
+ obj-$(CONFIG_SND_SOC_FSL_SPDIF) += snd-soc-fsl-spdif.o
+ obj-$(CONFIG_SND_SOC_FSL_ESAI) += snd-soc-fsl-esai.o
++obj-$(CONFIG_SND_SOC_FSL_HDMI) += snd-soc-fsl-hdmi.o
+ obj-$(CONFIG_SND_SOC_FSL_UTILS) += snd-soc-fsl-utils.o
+ obj-$(CONFIG_SND_SOC_POWERPC_DMA) += snd-soc-fsl-dma.o
+
+@@ -46,7 +48,7 @@
+
+ obj-$(CONFIG_SND_SOC_IMX_PCM_FIQ) += imx-pcm-fiq.o
+ obj-$(CONFIG_SND_SOC_IMX_PCM_DMA) += imx-pcm-dma.o
+-
++obj-$(CONFIG_SND_SOC_IMX_HDMI_DMA) += imx-hdmi-dma.o hdmi_pcm.o
+ # i.MX Machine Support
+ snd-soc-eukrea-tlv320-objs := eukrea-tlv320.o
+ snd-soc-phycore-ac97-objs := phycore-ac97.o
+@@ -57,6 +59,7 @@
+ snd-soc-imx-wm8962-objs := imx-wm8962.o
+ snd-soc-imx-spdif-objs := imx-spdif.o
+ snd-soc-imx-mc13783-objs := imx-mc13783.o
++snd-soc-imx-hdmi-objs := imx-hdmi.o
+
+ obj-$(CONFIG_SND_SOC_EUKREA_TLV320) += snd-soc-eukrea-tlv320.o
+ obj-$(CONFIG_SND_SOC_PHYCORE_AC97) += snd-soc-phycore-ac97.o
+@@ -67,3 +70,6 @@
+ obj-$(CONFIG_SND_SOC_IMX_WM8962) += snd-soc-imx-wm8962.o
+ obj-$(CONFIG_SND_SOC_IMX_SPDIF) += snd-soc-imx-spdif.o
+ obj-$(CONFIG_SND_SOC_IMX_MC13783) += snd-soc-imx-mc13783.o
++obj-$(CONFIG_SND_SOC_IMX_HDMI) += snd-soc-imx-hdmi.o
++
++AFLAGS_hdmi_pcm.o := -march=armv7-a -mtune=cortex-a9 -mfpu=neon -mfloat-abi=softfp
+diff -Nur linux-4.1.3/sound/soc/soc-io.c linux-xbian-imx6/sound/soc/soc-io.c
+--- linux-4.1.3/sound/soc/soc-io.c 2015-07-21 19:10:33.000000000 +0200
++++ linux-xbian-imx6/sound/soc/soc-io.c 2015-07-27 23:13:11.344506380 +0200
+@@ -17,6 +17,8 @@
+ #include <linux/export.h>
+ #include <sound/soc.h>
+
++#include <trace/events/asoc.h>
++
+ /**
+ * snd_soc_component_read() - Read register value
+ * @component: Component to read from
+@@ -271,3 +273,233 @@
+ return snd_soc_component_write(&platform->component, reg, val);
+ }
+ EXPORT_SYMBOL_GPL(snd_soc_platform_write);
++
++#ifdef CONFIG_REGMAP
++static bool snd_soc_set_cache_val(void *base, unsigned int idx,
++ unsigned int val, unsigned int word_size)
++{
++ switch (word_size) {
++ case 1: {
++ u8 *cache = base;
++ if (cache[idx] == val)
++ return true;
++ cache[idx] = val;
++ break;
++ }
++ case 2: {
++ u16 *cache = base;
++ if (cache[idx] == val)
++ return true;
++ cache[idx] = val;
++ break;
++ }
++ default:
++ WARN(1, "Invalid word_size %d\n", word_size);
++ break;
++ }
++ return false;
++}
++
++static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
++ unsigned int word_size)
++{
++ if (!base)
++ return -1;
++
++ switch (word_size) {
++ case 1: {
++ const u8 *cache = base;
++ return cache[idx];
++ }
++ case 2: {
++ const u16 *cache = base;
++ return cache[idx];
++ }
++ default:
++ WARN(1, "Invalid word_size %d\n", word_size);
++ break;
++ }
++ /* unreachable */
++ return -1;
++}
++
++/**
++ * snd_soc_cache_read: Fetch the value of a given register from the cache.
++ *
++ * @codec: CODEC to configure.
++ * @reg: The register index.
++ * @value: The value to be returned.
++ */
++int snd_soc_cache_read(struct snd_soc_codec *codec,
++ unsigned int reg, unsigned int *value)
++{
++ if (!value)
++ return -EINVAL;
++
++ mutex_lock(&codec->cache_rw_mutex);
++ *value = snd_soc_get_cache_val(codec->reg_cache, reg,
++ codec->driver->reg_word_size);
++ mutex_unlock(&codec->cache_rw_mutex);
++
++ return 0;
++}
++
++/**
++ * snd_soc_cache_write: Set the value of a given register in the cache.
++ *
++ * @codec: CODEC to configure.
++ * @reg: The register index.
++ * @value: The new register value.
++ */
++int snd_soc_cache_write(struct snd_soc_codec *codec,
++ unsigned int reg, unsigned int value)
++{
++ mutex_lock(&codec->cache_rw_mutex);
++ snd_soc_set_cache_val(codec->reg_cache, reg, value,
++ codec->driver->reg_word_size);
++ mutex_unlock(&codec->cache_rw_mutex);
++
++ return 0;
++}
++
++/**
++ * snd_soc_codec_volatile_register: Report if a register is volatile.
++ *
++ * @codec: CODEC to query.
++ * @reg: Register to query.
++ *
++ * Boolean function indiciating if a CODEC register is volatile.
++ */
++int snd_soc_codec_volatile_register(struct snd_soc_codec *codec,
++ unsigned int reg)
++{
++ if (codec->volatile_register)
++ return codec->volatile_register(codec, reg);
++ else
++ return 0;
++}
++
++static int hw_write(struct snd_soc_codec *codec, unsigned int reg,
++ unsigned int value)
++{
++ int ret;
++
++ if (!snd_soc_codec_volatile_register(codec, reg) &&
++ reg < codec->driver->reg_cache_size &&
++ !codec->cache_bypass) {
++ ret = snd_soc_cache_write(codec, reg, value);
++ if (ret < 0)
++ return -1;
++ }
++
++ if (codec->cache_only) {
++ codec->cache_sync = 1;
++ return 0;
++ }
++
++ return regmap_write(codec->control_data, reg, value);
++}
++
++static unsigned int hw_read(struct snd_soc_codec *codec, unsigned int reg)
++{
++ int ret;
++ unsigned int val;
++
++ if (reg >= codec->driver->reg_cache_size ||
++ snd_soc_codec_volatile_register(codec, reg) ||
++ codec->cache_bypass) {
++ if (codec->cache_only)
++ return -1;
++
++ ret = regmap_read(codec->control_data, reg, &val);
++ if (ret == 0)
++ return val;
++ else
++ return -1;
++ }
++
++ ret = snd_soc_cache_read(codec, reg, &val);
++ if (ret < 0)
++ return -1;
++ return val;
++}
++
++/**
++ * snd_soc_codec_set_cache_io: Set up standard I/O functions.
++ *
++ * @codec: CODEC to configure.
++ * @addr_bits: Number of bits of register address data.
++ * @data_bits: Number of bits of data per register.
++ * @control: Control bus used.
++ *
++ * Register formats are frequently shared between many I2C and SPI
++ * devices. In order to promote code reuse the ASoC core provides
++ * some standard implementations of CODEC read and write operations
++ * which can be set up using this function.
++ *
++ * The caller is responsible for allocating and initialising the
++ * actual cache.
++ *
++ * Note that at present this code cannot be used by CODECs with
++ * volatile registers.
++ */
++int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
++ int addr_bits, int data_bits,
++ enum snd_soc_control_type control)
++{
++ struct regmap_config config;
++ int ret;
++
++ memset(&config, 0, sizeof(config));
++ codec->write = hw_write;
++ codec->read = hw_read;
++
++ config.reg_bits = addr_bits;
++ config.val_bits = data_bits;
++
++ switch (control) {
++#if IS_ENABLED(CONFIG_REGMAP_I2C)
++ case SND_SOC_I2C:
++ codec->control_data = regmap_init_i2c(to_i2c_client(codec->dev),
++ &config);
++ break;
++#endif
++
++#if IS_ENABLED(CONFIG_REGMAP_SPI)
++ case SND_SOC_SPI:
++ codec->control_data = regmap_init_spi(to_spi_device(codec->dev),
++ &config);
++ break;
++#endif
++
++ case SND_SOC_REGMAP:
++ /* Device has made its own regmap arrangements */
++ codec->using_regmap = true;
++ if (!codec->control_data)
++ codec->control_data = dev_get_regmap(codec->dev, NULL);
++
++ if (codec->control_data) {
++ ret = regmap_get_val_bytes(codec->control_data);
++ /* Errors are legitimate for non-integer byte
++ * multiples */
++ if (ret > 0)
++ codec->val_bytes = ret;
++ }
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ return PTR_ERR_OR_ZERO(codec->control_data);
++}
++EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
++#else
++int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
++ int addr_bits, int data_bits,
++ enum snd_soc_control_type control)
++{
++ return -ENOTSUPP;
++}
++EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
++#endif